def test_generator_methods(self):
    model = testing_utils.get_small_mlp(10, 4, 3)
    optimizer = rmsprop.RMSprop(learning_rate=0.001)
    model.compile(
        optimizer,
        loss='mse',
        metrics=['mae', metrics_module.CategoricalAccuracy()],
        run_eagerly=True)

    x = np.random.random((10, 3))
    y = np.random.random((10, 4))

    def numpy_iterator():
      while True:
        yield x, y

    model.fit_generator(numpy_iterator(), steps_per_epoch=3, epochs=1)
    model.evaluate_generator(numpy_iterator(), steps=3)

    def inference_numpy_iterator():
      while True:
        yield x

    out = model.predict_generator(inference_numpy_iterator(), steps=3)
    self.assertEqual(out.shape, (30, 4))
  def test_finite_dataset_unknown_cardinality_out_of_data(self):
    model = testing_utils.get_small_mlp(1, 4, input_dim=3)
    model.compile('rmsprop', 'mse',
                  run_eagerly=testing_utils.should_run_eagerly())

    inputs = np.zeros((100, 3), dtype=np.float32)
    targets = np.random.randint(0, 4, size=100, dtype=np.int32)
    dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets))
    dataset = dataset.filter(lambda x, y: True).batch(10)
    self.assertEqual(
        keras.backend.get_value(cardinality.cardinality(dataset)),
        cardinality.UNKNOWN)

    batch_counter = BatchCounterCallback()
    with test.mock.patch.object(logging, 'warning') as mock_log:
      # steps_per_epoch (200) is greater than the dataset size (100). As this is
      # unexpected, training will stop and not make it to the second epoch.
      history = model.fit(
          dataset,
          epochs=2,
          verbose=1,
          callbacks=[batch_counter],
          steps_per_epoch=200)
      self.assertIn(
          'Your dataset ran out of data; interrupting training. '
          'Make sure that your dataset can generate at least '
          '`steps_per_epoch * epochs` batches (in this case, 400 batches). '
          'You may need to use the repeat() function when '
          'building your dataset.', str(mock_log.call_args))

    self.assertLen(history.history['loss'], 1)
    self.assertEqual(batch_counter.batch_count, 10)
    model.evaluate(dataset)
    out = model.predict(dataset)
    self.assertEqual(out.shape[0], 100)
  def test_model_fit_and_validation_with_missing_arg_errors(self):
    model = testing_utils.get_small_mlp(10, 4, 3)
    model.compile(optimizer=rmsprop.RMSprop(learning_rate=0.001),
                  loss='mse',
                  run_eagerly=True)

    x = array_ops.zeros(shape=(10, 3))
    y = array_ops.zeros(shape=(10, 4))
    dataset = dataset_ops.Dataset.from_tensor_slices((x, y)).repeat(10).batch(5)
    iterator = dataset_ops.make_one_shot_iterator(dataset)
    validation_dataset = dataset_ops.Dataset.from_tensor_slices(
        (x, y)).repeat().batch(5)  # Infinite dataset.
    validation_iterator = dataset_ops.make_one_shot_iterator(validation_dataset)

    with self.assertRaisesRegexp(
        ValueError, r'specify .* `steps_per_epoch`'):
      model.fit(iterator, epochs=1, verbose=0)
    if not context.executing_eagerly():
      # In eager execution, `array_ops.zeros` returns value tensors
      # which can be used for validation without a `validation_steps` argument.
      with self.assertRaisesRegexp(
          ValueError, r'provide either `batch_size` or `validation_steps`'):
        model.fit(iterator, steps_per_epoch=2, epochs=1, verbose=0,
                  validation_data=(x, y))
    # Step argument is required for infinite datasets.
    with self.assertRaisesRegexp(ValueError,
                                 'specify the `validation_steps` argument.'):
      model.fit(iterator, steps_per_epoch=2, epochs=1, verbose=0,
                validation_data=validation_dataset)
    with self.assertRaisesRegexp(ValueError,
                                 'specify the `validation_steps` argument.'):
      model.fit(iterator, steps_per_epoch=2, epochs=1, verbose=0,
                validation_data=validation_iterator)
  def test_model_methods_with_eager_tensors_single_io(self):
    if not context.executing_eagerly():
      # Only test V2 Function and V2 Eager modes, as V1 Graph mode with
      # symbolic tensors has different requirements.
      return

    model = testing_utils.get_small_mlp(10, 4, 3)

    optimizer = rmsprop.RMSprop(learning_rate=0.001)
    loss = 'mse'
    metrics = ['mae', metrics_module.CategoricalAccuracy()]
    model.compile(
        optimizer,
        loss,
        metrics=metrics,
        run_eagerly=testing_utils.should_run_eagerly())

    inputs = array_ops.zeros(shape=(10, 3))
    targets = array_ops.zeros(shape=(10, 4))

    model.fit(inputs, targets, epochs=1, batch_size=2, verbose=0)
    model.fit(inputs, targets, epochs=1, batch_size=3, verbose=0, shuffle=False)
    model.fit(inputs, targets, epochs=1, batch_size=4, verbose=0,
              validation_data=(inputs, targets))
    model.evaluate(inputs, targets, batch_size=2, verbose=0)
    model.predict(inputs, batch_size=2)
    model.train_on_batch(inputs, targets)
    model.test_on_batch(inputs, targets)
  def test_training_and_eval_methods_on_iterators_single_io(self):
    model = testing_utils.get_small_mlp(1, 4, input_dim=3)
    optimizer = 'rmsprop'
    loss = 'mse'
    metrics = ['mae', metrics_module.CategoricalAccuracy()]
    model.compile(optimizer, loss, metrics=metrics,
                  run_eagerly=testing_utils.should_run_eagerly())

    inputs = np.zeros((10, 3), np.float32)
    targets = np.zeros((10, 4), np.float32)
    dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets))
    dataset = dataset.repeat(100)
    dataset = dataset.batch(10)
    iterator = dataset_ops.make_one_shot_iterator(dataset)

    model.fit(iterator, epochs=1, steps_per_epoch=2, verbose=1)
    model.evaluate(iterator, steps=2, verbose=1)
    model.predict(iterator, steps=2)

    # Test with validation data
    model.fit(iterator,
              epochs=1, steps_per_epoch=2, verbose=0,
              validation_data=iterator, validation_steps=2)
    # Test with validation split
    with self.assertRaisesRegexp(
        ValueError, '`validation_split` argument is not supported '
        'when input `x` is a dataset or a dataset iterator'):
      model.fit(iterator,
                epochs=1, steps_per_epoch=2, verbose=0,
                validation_split=0.5, validation_steps=2)

    # Test with sample weight.
    sample_weight = np.random.random((10,))
    with self.assertRaisesRegexp(
        ValueError, '`sample_weight` argument is not supported '
        'when input `x` is a dataset or a dataset iterator'):
      model.fit(
          iterator,
          epochs=1,
          steps_per_epoch=2,
          verbose=0,
          sample_weight=sample_weight)

    # Test invalid usage
    with self.assertRaisesRegexp(ValueError,
                                 'you should not specify a target'):
      model.fit(iterator, iterator,
                epochs=1, steps_per_epoch=2, verbose=0)

    with self.assertRaisesRegexp(
        ValueError, 'the `steps_per_epoch` argument'):
      model.fit(iterator, epochs=1, verbose=0)
    with self.assertRaisesRegexp(ValueError,
                                 'the `steps` argument'):
      model.evaluate(iterator, verbose=0)
    with self.assertRaisesRegexp(ValueError,
                                 'the `steps` argument'):
      model.predict(iterator, verbose=0)
  def test_trace_model_outputs_after_fitting(self):
    input_dim = 5 if testing_utils.get_model_type() == 'functional' else None
    model = testing_utils.get_small_mlp(10, 3, input_dim)
    model.compile(optimizer='sgd', loss='mse')
    model.fit(x=np.random.random((8, 5)),
              y=np.random.random((8, 3)), epochs=2)

    inputs = array_ops.ones((8, 5))

    fn = saving_utils.trace_model_call(model)
    signature_outputs = fn(inputs)
    expected_outputs = {model.output_names[0]: model(inputs)}

    self._assert_all_close(expected_outputs, signature_outputs)
  def test_model_save(self):
    input_dim = 5
    model = testing_utils.get_small_mlp(10, 3, input_dim)
    inputs = array_ops.ones((8, 5))

    if testing_utils.get_model_type() == 'subclass':
      model._set_inputs(inputs)

    save_dir = os.path.join(self.get_temp_dir(), 'saved_model')
    save_lib.save(model, save_dir)

    self.assertAllClose(
        {model.output_names[0]: model.predict_on_batch(inputs)},
        _import_and_infer(save_dir, {model.input_names[0]: np.ones((8, 5))}))
  def test_dataset_with_sparse_labels(self):
    model = testing_utils.get_small_mlp(1, 4, input_dim=3)
    optimizer = 'rmsprop'
    model.compile(
        optimizer,
        loss='sparse_categorical_crossentropy',
        run_eagerly=testing_utils.should_run_eagerly())

    inputs = np.zeros((10, 3), dtype=np.float32)
    targets = np.random.randint(0, 4, size=10, dtype=np.int32)
    dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets))
    dataset = dataset.repeat(100)
    dataset = dataset.batch(10)

    model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=1)
  def test_finite_dataset_known_cardinality_no_steps_arg(self):
    model = testing_utils.get_small_mlp(1, 4, input_dim=3)
    optimizer = 'rmsprop'
    model.compile(optimizer, 'mse',
                  run_eagerly=testing_utils.should_run_eagerly())

    inputs = np.zeros((100, 3), dtype=np.float32)
    targets = np.random.randint(0, 4, size=100, dtype=np.int32)
    dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets))
    dataset = dataset.batch(10)

    history = model.fit(dataset, epochs=2, verbose=1)
    self.assertEqual(len(history.history['loss']), 2)
    model.evaluate(dataset)
    out = model.predict(dataset)
    self.assertEqual(out.shape[0], 100)
Example #10
0
  def test_trace_model_outputs(self):
    input_dim = 5 if testing_utils.get_model_type() == 'functional' else None
    model = testing_utils.get_small_mlp(10, 3, input_dim)
    inputs = array_ops.ones((8, 5))

    if input_dim is None:
      with self.assertRaisesRegexp(ValueError,
                                   'input shapes have not been set'):
        saving_utils.trace_model_call(model)
      model._set_inputs(inputs)

    fn = saving_utils.trace_model_call(model)
    signature_outputs = fn(inputs)
    expected_outputs = {model.output_names[0]: model(inputs)}

    self._assert_all_close(expected_outputs, signature_outputs)
  def test_finite_dataset_unknown_cardinality_no_step_with_train_and_val(self):

    class CaptureStdout(object):

      def __enter__(self):
        self._stdout = sys.stdout
        string_io = six.StringIO()
        sys.stdout = string_io
        self._stringio = string_io
        return self

      def __exit__(self, *args):
        self.output = self._stringio.getvalue()
        sys.stdout = self._stdout

    model = testing_utils.get_small_mlp(1, 4, input_dim=3)
    model.compile(
        'rmsprop', 'mse', run_eagerly=testing_utils.should_run_eagerly())

    inputs = np.zeros((100, 3), dtype=np.float32)
    targets = np.random.randint(0, 4, size=100, dtype=np.int32)
    dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets))
    dataset = dataset.filter(lambda x, y: True).batch(10)
    self.assertEqual(
        keras.backend.get_value(cardinality.cardinality(dataset)),
        cardinality.UNKNOWN)

    batch_counter = BatchCounterCallback()
    with CaptureStdout() as capture:
      history = model.fit(
          dataset,
          epochs=2,
          callbacks=[batch_counter],
          validation_data=dataset.take(3))

    lines = capture.output.splitlines()

    self.assertIn('1/Unknown', lines[2])
    self.assertIn('10/10', lines[-1])

    self.assertLen(history.history['loss'], 2)
    self.assertEqual(batch_counter.batch_count, 20)
    model.evaluate(dataset)
    out = model.predict(dataset)
    self.assertEqual(out.shape[0], 100)
  def test_dataset_with_sample_weights(self):
    model = testing_utils.get_small_mlp(1, 4, input_dim=3)
    optimizer = 'rmsprop'
    loss = 'mse'
    metrics = ['mae', metrics_module.CategoricalAccuracy()]
    model.compile(optimizer, loss, metrics=metrics,
                  run_eagerly=testing_utils.should_run_eagerly())

    inputs = np.zeros((10, 3), np.float32)
    targets = np.zeros((10, 4), np.float32)
    sample_weights = np.ones((10), np.float32)
    dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets,
                                                      sample_weights))
    dataset = dataset.repeat(100)
    dataset = dataset.batch(10)

    model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=1)
    model.evaluate(dataset, steps=2, verbose=1)
    model.predict(dataset, steps=2)
  def test_iterators_running_out_of_data(self):
    model = testing_utils.get_small_mlp(1, 4, input_dim=3)
    optimizer = 'rmsprop'
    loss = 'mse'
    metrics = ['mae']
    model.compile(optimizer, loss, metrics=metrics,
                  run_eagerly=testing_utils.should_run_eagerly())

    inputs = np.zeros((10, 3), np.float32)
    targets = np.zeros((10, 4), np.float32)
    dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets))
    dataset = dataset.repeat(2)
    dataset = dataset.batch(10)
    iterator = dataset_ops.make_one_shot_iterator(dataset)

    with test.mock.patch.object(logging, 'warning') as mock_log:
      model.fit(iterator, epochs=1, steps_per_epoch=3, verbose=0)
      self.assertRegexpMatches(
          str(mock_log.call_args),
          'dataset iterator ran out of data')
  def test_finite_dataset_unknown_cardinality_no_steps_arg(self):
    model = testing_utils.get_small_mlp(1, 4, input_dim=3)
    model.compile('rmsprop', 'mse',
                  run_eagerly=testing_utils.should_run_eagerly())

    inputs = np.zeros((100, 3), dtype=np.float32)
    targets = np.random.randint(0, 4, size=100, dtype=np.int32)
    dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets))
    dataset = dataset.filter(lambda x, y: True).batch(10)
    self.assertEqual(keras.backend.get_value(cardinality.cardinality(dataset)),
                     cardinality.UNKNOWN)

    batch_counter = BatchCounterCallback()
    history = model.fit(dataset, epochs=2, verbose=1, callbacks=[batch_counter])

    self.assertLen(history.history['loss'], 2)
    self.assertEqual(batch_counter.batch_count, 20)
    model.evaluate(dataset)
    out = model.predict(dataset)
    self.assertEqual(out.shape[0], 100)
Example #15
0
  def test_dataset_with_sample_weights(self):
    model = testing_utils.get_small_mlp(1, 4, input_dim=3)
    optimizer = 'rmsprop'
    loss = 'mse'
    metrics = ['mae', metrics_module.CategoricalAccuracy()]
    model.compile(
        optimizer,
        loss,
        metrics=metrics,
        run_eagerly=testing_utils.should_run_eagerly(),
        experimental_run_tf_function=testing_utils.should_run_tf_function())

    inputs = np.zeros((10, 3), np.float32)
    targets = np.zeros((10, 4), np.float32)
    sample_weights = np.ones((10), np.float32)
    dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets,
                                                      sample_weights))
    dataset = dataset.repeat(100)
    dataset = dataset.batch(10)

    model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=1)
    model.evaluate(dataset, steps=2, verbose=1)
    model.predict(dataset, steps=2)
    def test_calling_model_on_same_dataset(self):
        if ((not testing_utils.should_run_eagerly())
                and testing_utils.get_model_type() == 'subclass'
                and context.executing_eagerly()
                and (not testing_utils.should_run_tf_function())):
            self.skipTest('b/120673224')

        model = testing_utils.get_small_mlp(1, 4, input_dim=3)
        optimizer = 'rmsprop'
        loss = 'mse'
        metrics = ['mae']
        model.compile(optimizer,
                      loss,
                      metrics=metrics,
                      run_eagerly=testing_utils.should_run_eagerly(),
                      experimental_run_tf_function=testing_utils.
                      should_run_tf_function())

        inputs = np.zeros((10, 3), np.float32)
        targets = np.zeros((10, 4), np.float32)
        dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets))
        dataset = dataset.repeat(100)
        dataset = dataset.batch(10)

        # Call fit with validation data
        model.fit(dataset,
                  epochs=1,
                  steps_per_epoch=2,
                  verbose=0,
                  validation_data=dataset,
                  validation_steps=2)
        model.fit(dataset,
                  epochs=1,
                  steps_per_epoch=2,
                  verbose=0,
                  validation_data=dataset,
                  validation_steps=2)
    def test_calling_model_on_same_dataset(self):
        if ((not testing_utils.should_run_eagerly())
                and testing_utils.get_model_type() == 'subclass'
                and context.executing_eagerly()):
            self.skipTest('b/120673224')

        model = testing_utils.get_small_mlp(1, 4, input_dim=3)
        optimizer = RMSPropOptimizer(learning_rate=0.001)
        loss = 'mse'
        metrics = ['mae']
        model.compile(optimizer,
                      loss,
                      metrics=metrics,
                      run_eagerly=testing_utils.should_run_eagerly())

        inputs = np.zeros((10, 3), np.float32)
        targets = np.zeros((10, 4), np.float32)
        dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets))
        dataset = dataset.repeat(100)
        dataset = dataset.batch(10)

        # Call fit with validation data
        model.fit(dataset,
                  epochs=1,
                  steps_per_epoch=2,
                  verbose=0,
                  validation_data=dataset,
                  validation_steps=2)
        # Finalize the graph to make sure new ops aren't added when calling on the
        # same dataset
        ops.get_default_graph().finalize()
        model.fit(dataset,
                  epochs=1,
                  steps_per_epoch=2,
                  verbose=0,
                  validation_data=dataset,
                  validation_steps=2)
    def test_predict_generator_method(self):
        model = testing_utils.get_small_mlp(num_hidden=3,
                                            num_classes=4,
                                            input_dim=2)
        model.compile(loss='mse',
                      optimizer='sgd',
                      metrics=['mae',
                               metrics_module.CategoricalAccuracy()],
                      run_eagerly=testing_utils.should_run_eagerly())

        model.predict_generator(custom_generator(),
                                steps=5,
                                max_queue_size=10,
                                workers=2,
                                use_multiprocessing=True)
        model.predict_generator(custom_generator(),
                                steps=5,
                                max_queue_size=10,
                                use_multiprocessing=False)
        model.predict_generator(custom_generator(),
                                steps=5,
                                max_queue_size=10,
                                workers=0)
        # Test generator with just inputs (no targets)
        model.predict_generator(custom_generator(mode=1),
                                steps=5,
                                max_queue_size=10,
                                workers=2,
                                use_multiprocessing=True)
        model.predict_generator(custom_generator(mode=1),
                                steps=5,
                                max_queue_size=10,
                                use_multiprocessing=False)
        model.predict_generator(custom_generator(mode=1),
                                steps=5,
                                max_queue_size=10,
                                workers=0)
    def test_finite_dataset_unknown_cardinality_out_of_data(self):
        model = testing_utils.get_small_mlp(1, 4, input_dim=3)
        model.compile('rmsprop',
                      'mse',
                      run_eagerly=testing_utils.should_run_eagerly(),
                      run_distributed=testing_utils.should_run_distributed())

        inputs = np.zeros((100, 3), dtype=np.float32)
        targets = np.random.randint(0, 4, size=100, dtype=np.int32)
        dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets))
        dataset = dataset.filter(lambda x, y: True).batch(10)
        self.assertEqual(
            keras.backend.get_value(cardinality.cardinality(dataset)),
            cardinality.UNKNOWN)

        batch_counter = BatchCounterCallback()
        with test.mock.patch.object(logging, 'warning') as mock_log:
            # steps_per_epoch (200) is greater than the dataset size (100). As this is
            # unexpected, training will stop and not make it to the second epoch.
            history = model.fit(dataset,
                                epochs=2,
                                verbose=1,
                                callbacks=[batch_counter],
                                steps_per_epoch=200)
            self.assertIn('ran out of data; interrupting training.',
                          str(mock_log.call_args))
            self.assertIn(
                'can generate at least '
                '`steps_per_epoch * epochs` batches (in this case, 400 batches). '
                'You may need to use the repeat() function when '
                'building your dataset.', str(mock_log.call_args))

        self.assertLen(history.history['loss'], 1)
        self.assertEqual(batch_counter.batch_count, 10)
        model.evaluate(dataset)
        out = model.predict(dataset)
        self.assertEqual(out.shape[0], 100)
Example #20
0
    def test_model_methods_with_eager_tensors_single_io(self):
        if not context.executing_eagerly():
            # Only test V2 Function and V2 Eager modes, as V1 Graph mode with
            # symbolic tensors has different requirements.
            return

        model = testing_utils.get_small_mlp(10, 4, 3)

        optimizer = rmsprop.RMSprop(learning_rate=0.001)
        loss = 'mse'
        metrics = ['mae', metrics_module.CategoricalAccuracy()]
        model.compile(optimizer,
                      loss,
                      metrics=metrics,
                      run_eagerly=testing_utils.should_run_eagerly(),
                      run_distributed=testing_utils.should_run_distributed())

        inputs = array_ops.zeros(shape=(10, 3))
        targets = array_ops.zeros(shape=(10, 4))

        model.fit(inputs, targets, epochs=1, batch_size=2, verbose=0)
        model.fit(inputs,
                  targets,
                  epochs=1,
                  batch_size=3,
                  verbose=0,
                  shuffle=False)
        model.fit(inputs,
                  targets,
                  epochs=1,
                  batch_size=4,
                  verbose=0,
                  validation_data=(inputs, targets))
        model.evaluate(inputs, targets, batch_size=2, verbose=0)
        model.predict(inputs, batch_size=2)
        model.train_on_batch(inputs, targets)
        model.test_on_batch(inputs, targets)
  def test_generator_methods_invalid_use_case(self):

    def invalid_generator():
      while 1:
        yield 0

    model = testing_utils.get_small_mlp(
        num_hidden=3, num_classes=4, input_dim=2)
    model.compile(loss='mse', optimizer=rmsprop.RMSprop(1e-3),
                  run_eagerly=testing_utils.should_run_eagerly())

    with self.assertRaises(ValueError):
      model.fit_generator(invalid_generator(),
                          steps_per_epoch=5,
                          epochs=1,
                          verbose=1,
                          max_queue_size=10,
                          use_multiprocessing=False)
    with self.assertRaises(ValueError):
      model.fit_generator(custom_generator(),
                          steps_per_epoch=5,
                          epochs=1,
                          verbose=1,
                          max_queue_size=10,
                          use_multiprocessing=False,
                          validation_data=invalid_generator(),
                          validation_steps=10)
    with self.assertRaises(AttributeError):
      model.predict_generator(invalid_generator(),
                              steps=5,
                              max_queue_size=10,
                              use_multiprocessing=False)
    with self.assertRaises(ValueError):
      model.evaluate_generator(invalid_generator(),
                               steps=5,
                               max_queue_size=10,
                               use_multiprocessing=False)
Example #22
0
    def test_model_save_and_load(self):
        input_arr = np.random.random((1, 3)).astype(np.float32)
        target_arr = np.random.random((1, 4)).astype(np.float32)

        model = testing_utils.get_small_mlp(1, 4, input_dim=3)
        model.layers[-1].activity_regularizer = regularizers.get('l2')
        model.activity_regularizer = regularizers.get('l2')
        model.compile(loss='mse', optimizer='rmsprop')
        model.train_on_batch(input_arr, target_arr)

        def callable_loss():
            return math_ops.reduce_sum(model.weights[0])

        model.add_loss(callable_loss)
        saved_model_dir = self._save_model_dir()
        tf_save.save(model, saved_model_dir)

        loaded = saved_model_load.load(saved_model_dir)
        self.evaluate(variables.variables_initializer(loaded.variables))
        self.assertAllClose(self.evaluate(model.weights),
                            self.evaluate(loaded.weights))

        input_arr = constant_op.constant(
            np.random.random((1, 3)).astype(np.float32))
        self.assertAllClose(self.evaluate(model(input_arr)),
                            self.evaluate(loaded(input_arr)))
        # Validate losses. The order of conditional losses may change between the
        # model and loaded model, so sort the losses first.
        if context.executing_eagerly():
            self.assertAllClose(sorted(self.evaluate(model.losses)),
                                sorted(self.evaluate(loaded.losses)))
        else:
            self.assertAllClose(self.evaluate(model.get_losses_for(None)),
                                self.evaluate(loaded.get_losses_for(None)))
            self.assertAllClose(
                sorted(self.evaluate(model.get_losses_for(input_arr))),
                sorted(self.evaluate(loaded.get_losses_for(input_arr))))
Example #23
0
    def test_finite_dataset_known_cardinality_no_steps_arg(self):
        model = testing_utils.get_small_mlp(1, 4, input_dim=3)
        model.compile('rmsprop',
                      'mse',
                      run_eagerly=testing_utils.should_run_eagerly(),
                      experimental_run_tf_function=testing_utils.
                      should_run_tf_function())

        inputs = np.zeros((100, 3), dtype=np.float32)
        targets = np.random.randint(0, 4, size=100, dtype=np.int32)
        dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets))
        dataset = dataset.batch(10)

        batch_counter = BatchCounterCallback()
        history = model.fit(dataset,
                            epochs=2,
                            verbose=1,
                            callbacks=[batch_counter])

        self.assertLen(history.history['loss'], 2)
        self.assertEqual(batch_counter.batch_end_count, 20)
        model.evaluate(dataset)
        out = model.predict(dataset)
        self.assertEqual(out.shape[0], 100)
  def test_calling_model_on_same_dataset(self):
    if ((not testing_utils.should_run_eagerly())
        and testing_utils.get_model_type() == 'subclass'
        and context.executing_eagerly()):
      self.skipTest('b/120673224')

    model = testing_utils.get_small_mlp(1, 4, input_dim=3)
    optimizer = 'rmsprop'
    loss = 'mse'
    metrics = ['mae']
    model.compile(optimizer, loss, metrics=metrics,
                  run_eagerly=testing_utils.should_run_eagerly())

    inputs = np.zeros((10, 3), np.float32)
    targets = np.zeros((10, 4), np.float32)
    dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets))
    dataset = dataset.repeat(100)
    dataset = dataset.batch(10)

    # Call fit with validation data
    model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=0,
              validation_data=dataset, validation_steps=2)
    model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=0,
              validation_data=dataset, validation_steps=2)
  def test_evaluate_generator_method(self):
    model = testing_utils.get_small_mlp(
        num_hidden=3, num_classes=4, input_dim=2)
    model.compile(
        loss='mse',
        optimizer=rmsprop.RMSprop(1e-3),
        metrics=['mae', metrics_module.CategoricalAccuracy()],
        run_eagerly=testing_utils.should_run_eagerly())

    model.evaluate_generator(custom_generator(),
                             steps=5,
                             max_queue_size=10,
                             workers=2,
                             verbose=1,
                             use_multiprocessing=True)
    model.evaluate_generator(custom_generator(),
                             steps=5,
                             max_queue_size=10,
                             use_multiprocessing=False)
    model.evaluate_generator(custom_generator(),
                             steps=5,
                             max_queue_size=10,
                             use_multiprocessing=False,
                             workers=0)
  def test_generator_input_to_fit_eval_predict(self):
    val_data = np.ones([10, 10], np.float32), np.ones([10, 1], np.float32)

    def ones_generator():
      while True:
        yield np.ones([10, 10], np.float32), np.ones([10, 1], np.float32)

    model = testing_utils.get_small_mlp(
        num_hidden=10, num_classes=1, input_dim=10)

    model.compile(rmsprop.RMSprop(0.001), 'binary_crossentropy',
                  run_eagerly=testing_utils.should_run_eagerly())
    model.fit(
        ones_generator(),
        steps_per_epoch=2,
        validation_data=val_data,
        epochs=2)
    model.evaluate(ones_generator(), steps=2)
    model.predict(ones_generator(), steps=2)

    with self.cached_session():
      model = keras.models.Sequential()
      model.add(keras.layers.Dense(1, input_shape=(2,)))
      model.compile(loss='mse', optimizer='sgd')
Example #27
0
  def test_generator_input_to_fit_eval_predict(self):
    if testing_utils.should_run_distributed():
      self.skipTest('b/137397816')
    val_data = np.ones([10, 10], np.float32), np.ones([10, 1], np.float32)

    def ones_generator():
      while True:
        yield np.ones([10, 10], np.float32), np.ones([10, 1], np.float32)

    model = testing_utils.get_small_mlp(
        num_hidden=10, num_classes=1, input_dim=10)

    model.compile(
        rmsprop.RMSprop(0.001),
        'binary_crossentropy',
        run_eagerly=testing_utils.should_run_eagerly(),
        run_distributed=testing_utils.should_run_distributed())
    model.fit(
        ones_generator(),
        steps_per_epoch=2,
        validation_data=val_data,
        epochs=2)
    model.evaluate(ones_generator(), steps=2)
    model.predict(ones_generator(), steps=2)
    def test_predict_generator_method(self):
        model = testing_utils.get_small_mlp(num_hidden=3,
                                            num_classes=4,
                                            input_dim=2)
        model.run_eagerly = testing_utils.should_run_eagerly()
        model._experimental_run_tf_function = testing_utils.should_run_tf_function(
        )

        self._sleep_at_end = True
        model.predict_generator(custom_generator(),
                                steps=5,
                                max_queue_size=10,
                                workers=2,
                                use_multiprocessing=True)
        model.predict_generator(custom_generator(),
                                steps=5,
                                max_queue_size=10,
                                use_multiprocessing=False)
        model.predict_generator(custom_generator(),
                                steps=5,
                                max_queue_size=10,
                                workers=0)
        # Test generator with just inputs (no targets)
        model.predict_generator(custom_generator(mode=1),
                                steps=5,
                                max_queue_size=10,
                                workers=2,
                                use_multiprocessing=True)
        model.predict_generator(custom_generator(mode=1),
                                steps=5,
                                max_queue_size=10,
                                use_multiprocessing=False)
        model.predict_generator(custom_generator(mode=1),
                                steps=5,
                                max_queue_size=10,
                                workers=0)
    def test_fit_generator_method(self):
        model = testing_utils.get_small_mlp(num_hidden=3,
                                            num_classes=4,
                                            input_dim=2)
        model.compile(loss='mse',
                      optimizer=rmsprop.RMSprop(1e-3),
                      metrics=['mae',
                               metrics_module.CategoricalAccuracy()])

        self._sleep_at_end = True
        model.fit_generator(custom_generator(),
                            steps_per_epoch=5,
                            epochs=1,
                            verbose=1,
                            max_queue_size=10,
                            workers=4,
                            use_multiprocessing=True)
        model.fit_generator(custom_generator(),
                            steps_per_epoch=5,
                            epochs=1,
                            verbose=1,
                            max_queue_size=10,
                            use_multiprocessing=False)
        model.fit_generator(custom_generator(),
                            steps_per_epoch=5,
                            epochs=1,
                            verbose=1,
                            max_queue_size=10,
                            use_multiprocessing=False,
                            validation_data=custom_generator(),
                            validation_steps=10)
        model.fit_generator(custom_generator(),
                            steps_per_epoch=5,
                            validation_data=custom_generator(),
                            validation_steps=1,
                            workers=0)
Example #30
0
 def testBody(self, with_brackets):
     with_brackets = "with_brackets" if with_brackets else "without_brackets"
     model_types.append(
         (with_brackets, testing_utils.get_model_type()))
     models.append(testing_utils.get_small_mlp(1, 4, input_dim=3))
Example #31
0
 def testBody(self):
     model_types.append(testing_utils.get_model_type())
     models.append(testing_utils.get_small_mlp(1, 4, input_dim=3))
 def testBody(self):
   model_types.append(testing_utils.get_model_type())
   models.append(testing_utils.get_small_mlp(1, 4, input_dim=3))
 def testBody(self, with_brackets):
   with_brackets = "with_brackets" if with_brackets else "without_brackets"
   model_types.append((with_brackets, testing_utils.get_model_type()))
   models.append(testing_utils.get_small_mlp(1, 4, input_dim=3))
  def test_predict_generator_method(self):
    model = testing_utils.get_small_mlp(
        num_hidden=3, num_classes=4, input_dim=2)
    model.run_eagerly = testing_utils.should_run_eagerly()

    self._sleep_at_end = True
    model.predict_generator(custom_generator(),
                            steps=5,
                            max_queue_size=10,
                            workers=2,
                            use_multiprocessing=True)
    model.predict_generator(custom_generator(),
                            steps=5,
                            max_queue_size=10,
                            use_multiprocessing=False)
    model.predict_generator(custom_generator(),
                            steps=5,
                            max_queue_size=10,
                            workers=0)
    # Test generator with just inputs (no targets)
    model.predict_generator(custom_generator(mode=1),
                            steps=5,
                            max_queue_size=10,
                            workers=4,
                            use_multiprocessing=True)
        model.fit_generator(custom_generator(),
                            steps_per_epoch=5,
                            epochs=1,
                            verbose=1,
                            max_queue_size=10,
                            use_multiprocessing=False)
        model.fit_generator(custom_generator(),
                            steps_per_epoch=5,
                            epochs=1,
                            verbose=1,
                            max_queue_size=10,
                            use_multiprocessing=False,
                            validation_data=custom_generator(),
                            validation_steps=10)
        model.fit_generator(custom_generator(),
                            steps_per_epoch=5,
                            validation_data=custom_generator(),
                            validation_steps=1,
                            workers=0)
        model.predict_generator(custom_generator(),
                                steps=5,
                                max_queue_size=10,
                                workers=2,
                                use_multiprocessing=True)
        model.predict_generator(custom_generator(),
                                steps=5,
                                max_queue_size=10,
                                use_multiprocessing=False)
        model.predict_generator(custom_generator(),
                                steps=5,
                                max_queue_size=10,
                                workers=0)
        model.evaluate_generator(custom_generator(),
                                 steps=5,
                                 max_queue_size=10,
                                 workers=2,
                                 verbose=1,
                                 use_multiprocessing=True)
        model.evaluate_generator(custom_generator(),
                                 steps=5,
                                 max_queue_size=10,
                                 use_multiprocessing=False)
        model.evaluate_generator(custom_generator(),
                                 steps=5,
                                 max_queue_size=10,
                                 use_multiprocessing=False,
                                 workers=0)
    def test_training_and_eval_methods_on_iterators_single_io(self):
        model = testing_utils.get_small_mlp(1, 4, input_dim=3)
        optimizer = RMSPropOptimizer(learning_rate=0.001)
        loss = 'mse'
        metrics = ['mae', metrics_module.CategoricalAccuracy()]
        model.compile(optimizer,
                      loss,
                      metrics=metrics,
                      run_eagerly=testing_utils.should_run_eagerly())

        inputs = np.zeros((10, 3), np.float32)
        targets = np.zeros((10, 4), np.float32)
        dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets))
        dataset = dataset.repeat(100)
        dataset = dataset.batch(10)
        iterator = dataset_ops.make_one_shot_iterator(dataset)

        model.fit(iterator, epochs=1, steps_per_epoch=2, verbose=1)
        model.evaluate(iterator, steps=2, verbose=1)
        model.predict(iterator, steps=2)

        # Test with validation data
        model.fit(iterator,
                  epochs=1,
                  steps_per_epoch=2,
                  verbose=0,
                  validation_data=iterator,
                  validation_steps=2)
        # Test with validation split
        with self.assertRaisesRegexp(
                ValueError, '`validation_split` argument is not supported '
                'when input `x` is a dataset or a dataset iterator'):
            model.fit(iterator,
                      epochs=1,
                      steps_per_epoch=2,
                      verbose=0,
                      validation_split=0.5,
                      validation_steps=2)

        # Test with sample weight.
        sample_weight = np.random.random((10, ))
        with self.assertRaisesRegexp(
                ValueError, '`sample_weight` argument is not supported '
                'when input `x` is a dataset or a dataset iterator'):
            model.fit(iterator,
                      epochs=1,
                      steps_per_epoch=2,
                      verbose=0,
                      sample_weight=sample_weight)

        # Test invalid usage
        with self.assertRaisesRegexp(ValueError,
                                     'you should not specify a target'):
            model.fit(iterator,
                      iterator,
                      epochs=1,
                      steps_per_epoch=2,
                      verbose=0)

        with self.assertRaisesRegexp(ValueError,
                                     'the `steps_per_epoch` argument'):
            model.fit(iterator, epochs=1, verbose=0)
        with self.assertRaisesRegexp(ValueError, 'the `steps` argument'):
            model.evaluate(iterator, verbose=0)
        with self.assertRaisesRegexp(ValueError, 'the `steps` argument'):
            model.predict(iterator, verbose=0)
Example #36
0
    def test_training_and_eval_methods_on_dataset(self):
        model = testing_utils.get_small_mlp(1, 4, input_dim=3)
        optimizer = 'rmsprop'
        loss = 'mse'
        metrics = ['mae', metrics_module.CategoricalAccuracy()]
        model.compile(optimizer,
                      loss,
                      metrics=metrics,
                      run_eagerly=testing_utils.should_run_eagerly(),
                      experimental_run_tf_function=testing_utils.
                      should_run_tf_function())

        inputs = np.zeros((10, 3), np.float32)
        targets = np.zeros((10, 4), np.float32)
        dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets))
        dataset = dataset.repeat()  # Infinite dataset.
        dataset = dataset.batch(10)

        model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=1)
        model.evaluate(dataset, steps=2, verbose=1)
        model.predict(dataset, steps=2)

        # Test with validation data
        model.fit(dataset,
                  epochs=1,
                  steps_per_epoch=2,
                  verbose=0,
                  validation_data=dataset,
                  validation_steps=2)

        # Test with validation split
        with self.assertRaisesRegexp(
                ValueError,
                '`validation_split` argument is not supported when '):
            model.fit(dataset,
                      epochs=1,
                      steps_per_epoch=2,
                      verbose=0,
                      validation_split=0.5,
                      validation_steps=2)

        # Test with sample weight.
        sample_weight = np.random.random((10, ))
        with self.assertRaisesRegexp(
                ValueError,
                r'`sample_weight` argument is not supported .+dataset'):
            model.fit(dataset,
                      epochs=1,
                      steps_per_epoch=2,
                      verbose=0,
                      sample_weight=sample_weight)

        # Test invalid usage
        with self.assertRaisesRegexp(
                ValueError, 'The `batch_size` argument must not be specified'):
            model.fit(dataset,
                      batch_size=10,
                      epochs=1,
                      steps_per_epoch=2,
                      verbose=0)

        with self.assertRaisesRegexp(
                ValueError, 'The `batch_size` argument must not be specified'):
            model.predict(dataset, batch_size=10, steps=2, verbose=0)
        with self.assertRaisesRegexp(
                ValueError, 'The `batch_size` argument must not be specified'):
            model.evaluate(dataset, batch_size=10, steps=2, verbose=0)

        with self.assertRaisesRegexp(
                ValueError, '(you should not specify a target)|'
                '(`y` argument is not supported when using dataset as input.)'
        ):
            model.fit(dataset, dataset, epochs=1, steps_per_epoch=2, verbose=0)

        # With an infinite dataset, `steps_per_epoch`/`steps` argument is required.
        with self.assertRaisesRegexp(ValueError,
                                     'the `steps_per_epoch` argument'):
            model.fit(dataset, epochs=1, verbose=0)
        with self.assertRaisesRegexp(ValueError, 'the `steps` argument'):
            model.evaluate(dataset, verbose=0)
        with self.assertRaisesRegexp(ValueError, 'the `steps` argument'):
            model.predict(dataset, verbose=0)