コード例 #1
0
    def test_generator_input_to_fit_eval_predict(self):
        val_data = np.ones([10, 10], np.float32), np.ones([10, 1], np.float32)

        def ones_generator():
            while True:
                yield np.ones([10, 10], np.float32), np.ones([10, 1],
                                                             np.float32)

        model = testing_utils.get_small_mlp(num_hidden=10,
                                            num_classes=1,
                                            input_dim=10)

        model.compile(rmsprop.RMSprop(0.001),
                      'binary_crossentropy',
                      run_eagerly=testing_utils.should_run_eagerly())
        model.fit(ones_generator(),
                  steps_per_epoch=2,
                  validation_data=val_data,
                  epochs=2)
        model.evaluate(ones_generator(), steps=2)
        model.predict(ones_generator(), steps=2)

        # Test with a changing batch size
        model = testing_utils.get_small_mlp(num_hidden=3,
                                            num_classes=4,
                                            input_dim=2)
        model.compile(loss='mse',
                      optimizer=rmsprop.RMSprop(1e-3),
                      metrics=['mae',
                               metrics_module.CategoricalAccuracy()])
        model.fit_generator(custom_generator_changing_batch_size(),
                            steps_per_epoch=5,
                            epochs=1,
                            verbose=1,
                            max_queue_size=10,
                            use_multiprocessing=False)
        model.fit_generator(
            custom_generator_changing_batch_size(),
            steps_per_epoch=5,
            epochs=1,
            verbose=1,
            max_queue_size=10,
            use_multiprocessing=False,
            validation_data=custom_generator_changing_batch_size(),
            validation_steps=10)

        model.fit(custom_generator_changing_batch_size(),
                  steps_per_epoch=5,
                  validation_data=custom_generator_changing_batch_size(),
                  validation_steps=10,
                  epochs=2)
        model.evaluate(custom_generator_changing_batch_size(), steps=5)
        model.predict(custom_generator_changing_batch_size(), steps=5)
コード例 #2
0
  def test_standard_loader(self):
    model = testing_utils.get_small_mlp(1, 4, input_dim=3)
    model.activity_regularizer = regularizers.get('l2')
    def eager_loss():
      return tf.reduce_sum(model.weights[0])
    model.add_loss(eager_loss)

    # Call predict to ensure that all layers are built and inputs are set.
    model.predict(np.random.random((1, 3)).astype(np.float32))
    saved_model_dir = self._save_model_dir()

    model.save(saved_model_dir, save_format='tf')

    loaded = tf.saved_model.load(saved_model_dir)
    self.evaluate(tf.compat.v1.variables_initializer(loaded.variables))
    all_close = ['variables', 'trainable_variables',
                 'non_trainable_variables']
    for attr in all_close:
      self.assertAllClose(self.evaluate(getattr(model, attr)),
                          self.evaluate(getattr(loaded.keras_api, attr)))
    self.assertLen(loaded.regularization_losses, 1)
    expected_layers = len(model.layers)
    self.assertEqual(expected_layers, len(loaded.keras_api.layers))
    input_arr = tf.ones((4, 3))
    self.assertAllClose(self.evaluate(model(input_arr)),
                        self.evaluate(loaded(input_arr, training=False)))
コード例 #3
0
ファイル: training_eager_test.py プロジェクト: zoahib/keras
    def test_generator_methods(self):
        model = testing_utils.get_small_mlp(10, 4, 3)
        optimizer = rmsprop.RMSprop(learning_rate=0.001)
        model.compile(optimizer,
                      loss='mse',
                      metrics=['mae',
                               metrics_module.CategoricalAccuracy()],
                      run_eagerly=True)

        x = np.random.random((10, 3))
        y = np.random.random((10, 4))

        def numpy_iterator():
            while True:
                yield x, y

        model.fit_generator(numpy_iterator(), steps_per_epoch=3, epochs=1)
        model.evaluate_generator(numpy_iterator(), steps=3)

        def inference_numpy_iterator():
            while True:
                yield x

        out = model.predict_generator(inference_numpy_iterator(), steps=3)
        self.assertEqual(out.shape, (30, 4))
コード例 #4
0
ファイル: training_eager_test.py プロジェクト: zoahib/keras
    def test_model_fit_and_validation_with_missing_arg_errors(self):
        model = testing_utils.get_small_mlp(10, 4, 3)
        model.compile(optimizer=rmsprop.RMSprop(learning_rate=0.001),
                      loss='mse',
                      run_eagerly=True)

        x = tf.zeros(shape=(10, 3))
        y = tf.zeros(shape=(10, 4))
        dataset = tf.data.Dataset.from_tensor_slices(
            (x, y)).repeat(10).batch(5)
        validation_dataset = tf.data.Dataset.from_tensor_slices(
            (x, y)).repeat().batch(5)  # Infinite dataset.

        model.fit(dataset, epochs=1, verbose=0)

        # Step argument is required for infinite datasets.
        with self.assertRaises(ValueError):
            model.fit(dataset,
                      steps_per_epoch=2,
                      epochs=1,
                      verbose=0,
                      validation_data=validation_dataset)
        with self.assertRaises(ValueError):
            model.fit(dataset,
                      steps_per_epoch=2,
                      epochs=1,
                      verbose=0,
                      validation_data=validation_dataset)
コード例 #5
0
    def test_fit_generator_method(self):
        model = testing_utils.get_small_mlp(num_hidden=3,
                                            num_classes=4,
                                            input_dim=2)
        model.compile(loss='mse',
                      optimizer=rmsprop.RMSprop(1e-3),
                      metrics=['mae',
                               metrics_module.CategoricalAccuracy()])

        model.fit_generator(custom_generator_threads(),
                            steps_per_epoch=5,
                            epochs=1,
                            verbose=1,
                            max_queue_size=10,
                            workers=4,
                            use_multiprocessing=True)
        model.fit_generator(custom_generator(),
                            steps_per_epoch=5,
                            epochs=1,
                            verbose=1,
                            max_queue_size=10,
                            use_multiprocessing=False)
        model.fit_generator(custom_generator(),
                            steps_per_epoch=5,
                            epochs=1,
                            verbose=1,
                            max_queue_size=10,
                            use_multiprocessing=False,
                            validation_data=custom_generator(),
                            validation_steps=10)
        model.fit_generator(custom_generator(),
                            steps_per_epoch=5,
                            validation_data=custom_generator(),
                            validation_steps=1,
                            workers=0)
コード例 #6
0
  def test_compiled_model(self):
    # TODO(b/134519980): Issue with model.fit if the model call function uses
    # a tf.function (Graph mode only).
    if not tf.executing_eagerly():
      return

    input_arr = np.random.random((1, 3))
    target_arr = np.random.random((1, 4))

    model = testing_utils.get_small_mlp(1, 4, input_dim=3)
    expected_predict = model.predict(input_arr)

    # Compile and save model.
    model.compile('rmsprop', 'mse')
    saved_model_dir = self._save_model_dir()
    model.save(saved_model_dir, save_format='tf')

    loaded = keras_load.load(saved_model_dir)
    actual_predict = loaded.predict(input_arr)
    self.assertAllClose(expected_predict, actual_predict)

    loss_before = loaded.evaluate(input_arr, target_arr)
    loaded.fit(input_arr, target_arr)
    loss_after = loaded.evaluate(input_arr, target_arr)
    self.assertLess(loss_after, loss_before)
    predict = loaded.predict(input_arr)

    ckpt_path = os.path.join(self.get_temp_dir(), 'weights')
    loaded.save_weights(ckpt_path)

    # Ensure that the checkpoint is compatible with the original model.
    model.load_weights(ckpt_path)
    self.assertAllClose(predict, model.predict(input_arr))
コード例 #7
0
    def test_training_with_sequences(self):
        class DummySequence(data_utils.Sequence):
            def __getitem__(self, idx):
                return np.zeros([10, 2]), np.ones([10, 4])

            def __len__(self):
                return 10

        model = testing_utils.get_small_mlp(num_hidden=3,
                                            num_classes=4,
                                            input_dim=2)
        model.compile(loss='mse', optimizer=rmsprop.RMSprop(1e-3))

        model.fit_generator(DummySequence(),
                            steps_per_epoch=10,
                            validation_data=custom_generator(),
                            validation_steps=1,
                            max_queue_size=10,
                            workers=0,
                            use_multiprocessing=True)
        model.fit_generator(DummySequence(),
                            steps_per_epoch=10,
                            validation_data=custom_generator(),
                            validation_steps=1,
                            max_queue_size=10,
                            workers=0,
                            use_multiprocessing=False)
コード例 #8
0
    def test_generator_methods_with_sample_weights(self):
        model = testing_utils.get_small_mlp(num_hidden=3,
                                            num_classes=4,
                                            input_dim=2)
        model.compile(loss='mse',
                      optimizer=rmsprop.RMSprop(1e-3),
                      metrics=['mae',
                               metrics_module.CategoricalAccuracy()],
                      run_eagerly=testing_utils.should_run_eagerly())

        model.fit_generator(custom_generator(mode=3),
                            steps_per_epoch=5,
                            epochs=1,
                            verbose=1,
                            max_queue_size=10,
                            use_multiprocessing=False)
        model.fit_generator(custom_generator(mode=3),
                            steps_per_epoch=5,
                            epochs=1,
                            verbose=1,
                            max_queue_size=10,
                            use_multiprocessing=False,
                            validation_data=custom_generator(mode=3),
                            validation_steps=10)
        model.predict_generator(custom_generator(mode=3),
                                steps=5,
                                max_queue_size=10,
                                use_multiprocessing=False)
        model.evaluate_generator(custom_generator(mode=3),
                                 steps=5,
                                 max_queue_size=10,
                                 use_multiprocessing=False)
コード例 #9
0
ファイル: saving_utils_test.py プロジェクト: zy009197/keras
  def test_model_with_fixed_input_dim(self):
    """Ensure that the batch_dim is removed when saving.

    When serving or retraining, it is important to reset the batch dim.
    This can be an issue inside of tf.function. See b/132783590 for context.
    """
    model = testing_utils.get_small_mlp(10, 3, 5)

    loss_object = keras.losses.MeanSquaredError()
    optimizer = gradient_descent.SGD()

    @tf.function
    def train_step(data, labels):
      with tf.GradientTape() as tape:
        predictions = model(data)
        loss = loss_object(labels, predictions)
      gradients = tape.gradient(loss, model.trainable_variables)
      optimizer.apply_gradients(zip(gradients, model.trainable_variables))

    x = np.random.random((8, 5))
    y = np.random.random((8, 3))

    train_step(x, y)

    fn = saving_utils.trace_model_call(model)
    self.assertEqual(fn.input_signature[0].shape.as_list(),
                     tf.TensorShape([None, 5]).as_list())
コード例 #10
0
    def test_evaluate_generator_method(self):
        model = testing_utils.get_small_mlp(num_hidden=3,
                                            num_classes=4,
                                            input_dim=2)
        model.compile(loss='mse',
                      optimizer=rmsprop.RMSprop(1e-3),
                      metrics=['mae',
                               metrics_module.CategoricalAccuracy()],
                      run_eagerly=testing_utils.should_run_eagerly())

        model.evaluate_generator(custom_generator_threads(),
                                 steps=5,
                                 max_queue_size=10,
                                 workers=2,
                                 verbose=1,
                                 use_multiprocessing=True)
        model.evaluate_generator(custom_generator(),
                                 steps=5,
                                 max_queue_size=10,
                                 use_multiprocessing=False)
        model.evaluate_generator(custom_generator(),
                                 steps=5,
                                 max_queue_size=10,
                                 use_multiprocessing=False,
                                 workers=0)
コード例 #11
0
    def test_predict_generator_method(self):
        model = testing_utils.get_small_mlp(num_hidden=3,
                                            num_classes=4,
                                            input_dim=2)
        model.run_eagerly = testing_utils.should_run_eagerly()

        model.predict_generator(custom_generator_threads(),
                                steps=5,
                                max_queue_size=10,
                                workers=2,
                                use_multiprocessing=True)
        model.predict_generator(custom_generator(),
                                steps=5,
                                max_queue_size=10,
                                use_multiprocessing=False)
        model.predict_generator(custom_generator(),
                                steps=5,
                                max_queue_size=10,
                                workers=0)
        # Test generator with just inputs (no targets)
        model.predict_generator(custom_generator_threads(mode=1),
                                steps=5,
                                max_queue_size=10,
                                workers=2,
                                use_multiprocessing=True)
        model.predict_generator(custom_generator(mode=1),
                                steps=5,
                                max_queue_size=10,
                                use_multiprocessing=False)
        model.predict_generator(custom_generator(mode=1),
                                steps=5,
                                max_queue_size=10,
                                workers=0)
コード例 #12
0
    def test_finite_dataset_unknown_cardinality_no_steps_arg(self):
        model = testing_utils.get_small_mlp(1, 4, input_dim=3)
        model.compile('rmsprop',
                      'mse',
                      run_eagerly=testing_utils.should_run_eagerly())

        inputs = np.zeros((100, 3), dtype=np.float32)
        targets = np.random.randint(0, 4, size=100, dtype=np.int32)
        dataset = tf.data.Dataset.from_tensor_slices((inputs, targets))
        dataset = dataset.filter(lambda x, y: True).batch(10)
        self.assertEqual(
            keras.backend.get_value(tf.data.experimental.cardinality(dataset)),
            tf.data.experimental.UNKNOWN_CARDINALITY)

        batch_counter = BatchCounterCallback()
        history = model.fit(dataset,
                            epochs=2,
                            verbose=1,
                            callbacks=[batch_counter])

        self.assertLen(history.history['loss'], 2)
        self.assertEqual(batch_counter.batch_end_count, 20)
        model.evaluate(dataset)
        out = model.predict(dataset)
        self.assertEqual(out.shape[0], 100)
コード例 #13
0
    def test_calling_model_on_same_dataset(self):
        model = testing_utils.get_small_mlp(1, 4, input_dim=3)
        optimizer = 'rmsprop'
        loss = 'mse'
        metrics = ['mae']
        model.compile(optimizer,
                      loss,
                      metrics=metrics,
                      run_eagerly=testing_utils.should_run_eagerly())

        inputs = np.zeros((10, 3), np.float32)
        targets = np.zeros((10, 4), np.float32)
        dataset = tf.data.Dataset.from_tensor_slices((inputs, targets))
        dataset = dataset.repeat(100)
        dataset = dataset.batch(10)

        # Call fit with validation data
        model.fit(dataset,
                  epochs=1,
                  steps_per_epoch=2,
                  verbose=0,
                  validation_data=dataset,
                  validation_steps=2)
        model.fit(dataset,
                  epochs=1,
                  steps_per_epoch=2,
                  verbose=0,
                  validation_data=dataset,
                  validation_steps=2)
コード例 #14
0
    def test_built_models(self, serializer):
        """Built models should be copyable and picklable for all model types."""
        if not tf.__internal__.tf2.enabled():
            self.skipTest(
                'pickle model only available in v2 when tf format is used.')
        model = testing_utils.get_small_mlp(num_hidden=1,
                                            num_classes=2,
                                            input_dim=3)
        model.compile(optimizer='sgd', loss='sparse_categorical_crossentropy')

        # train
        x = np.random.random(size=(1000, 3))
        y = np.random.randint(low=0, high=2, size=(1000, ))
        model.fit(x, y)  # builds model
        y1 = model.predict(x)
        # roundtrip with training
        model = serializer(model)
        y2 = model.predict(x)
        # check that the predictions are the same
        self.assertAllClose(y1, y2)
        # and that we can continue training
        model.fit(x, y)
        y3 = model.predict(x)
        # check that the predictions are the same
        self.assertNotAllClose(y2, y3)
コード例 #15
0
  def test_custom_metric_model(self):
    # TODO(b/134519980): Issue with `model.fit` if the model call function uses
    # a `tf.function` in graph mode.
    if not tf.executing_eagerly():
      return

    x = np.random.random((1, 3))
    y = np.random.random((1, 4))

    class CustomMetric(keras.metrics.MeanSquaredError):
      pass

    def zero_metric(y_true, y_pred):
      del y_true, y_pred
      return 0

    model = testing_utils.get_small_mlp(1, 4, input_dim=3)
    model.compile(loss='mse', optimizer='SGD',
                  metrics=[CustomMetric(), zero_metric])
    model.fit(x, y)
    saved_model_dir = self._save_model_dir()
    model.save(saved_model_dir, save_format='tf')

    with self.assertRaisesRegex(ValueError, 'custom_objects'):
      keras_load.load(saved_model_dir)

    with generic_utils.CustomObjectScope(
        {'CustomMetric': CustomMetric, 'zero_metric': zero_metric}):
      loaded = keras_load.load(saved_model_dir)

    self.evaluate([v.initializer for v in loaded.variables])
    loaded.fit(x, y)
コード例 #16
0
 def _get_model(self):
   model = testing_utils.get_small_mlp(1, 4, input_dim=3)
   model.layers[-1].activity_regularizer = regularizers.get('l2')
   model.activity_regularizer = regularizers.get('l2')
   model.compile(
       loss='mse',
       optimizer='rmsprop')
   def callable_loss():
     return tf.reduce_sum(model.weights[0])
   model.add_loss(callable_loss)
   return model
コード例 #17
0
  def test_load_weights_from_saved_model(self):
    save_path = self._save_model_dir()
    save_format = testing_utils.get_save_format()

    if save_format == 'h5' and testing_utils.get_model_type() == 'subclass':
      # TODO(b/173646281): HDF5 format currently does not allow saving
      # subclassed models.
      return

    with self.cached_session():
      model = testing_utils.get_small_mlp(1, 4, input_dim=3)
      data = np.random.random((1, 3))
      labels = np.random.random((1, 4))
      model.compile(loss='mse', optimizer='rmsprop')
      model.fit(data, labels)
      model.save(save_path, save_format=save_format)
      new_model = testing_utils.get_small_mlp(1, 4, input_dim=3)
      if testing_utils.get_model_type() == 'subclass':
        # Call on test data to build the model.
        new_model.predict(data)
      new_model.load_weights(save_path)
      self.assertAllClose(model.weights, new_model.weights)
コード例 #18
0
 def test_unbuilt_models(self, serializer):
     """Unbuilt models should be copyable & deepcopyable for all model types."""
     if not tf.__internal__.tf2.enabled():
         self.skipTest(
             'pickle model only available in v2 when tf format is used.')
     original_model = testing_utils.get_small_mlp(num_hidden=1,
                                                  num_classes=2,
                                                  input_dim=3)
     # roundtrip without compiling or training
     model = serializer(original_model)
     # compile
     model.compile(optimizer='sgd', loss='sparse_categorical_crossentropy')
     # roundtrip compiled but not trained
     model = serializer(model)
コード例 #19
0
    def test_dataset_with_sparse_labels(self):
        model = testing_utils.get_small_mlp(1, 4, input_dim=3)
        optimizer = 'rmsprop'
        model.compile(optimizer,
                      loss='sparse_categorical_crossentropy',
                      run_eagerly=testing_utils.should_run_eagerly())

        inputs = np.zeros((10, 3), dtype=np.float32)
        targets = np.random.randint(0, 4, size=10, dtype=np.int32)
        dataset = tf.data.Dataset.from_tensor_slices((inputs, targets))
        dataset = dataset.repeat(100)
        dataset = dataset.batch(10)

        model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=1)
コード例 #20
0
def main(_) -> None:
    with testing_utils.model_type_scope('functional'):
        model = testing_utils.get_small_mlp(1, 4, input_dim=3)
        model.layers[-1].activity_regularizer = regularizers.get('l2')
        model.activity_regularizer = regularizers.get('l2')
        model.compile(loss='mse', optimizer='rmsprop')

        def callable_loss():
            return tf.reduce_sum(model.weights[0])

        model.add_loss(callable_loss)

        print(f'_____Writing saved model to: {FLAGS.output_path}')
        model.save(FLAGS.output_path)
コード例 #21
0
  def test_custom_metric_model(self):

    class CustomMetric(keras.metrics.MeanSquaredError):
      pass

    model = testing_utils.get_small_mlp(1, 4, input_dim=3)
    model.compile(loss='mse', optimizer='rmsprop', metrics=[CustomMetric()])

    saved_model_dir = self._save_model_dir()
    tf.saved_model.save(model, saved_model_dir)
    with self.assertRaisesRegex(ValueError, 'custom_objects'):
      keras_load.load(saved_model_dir)

    keras_load.load(saved_model_dir, compile=False)
コード例 #22
0
  def _test_save_and_load(self, use_dataset=False):
    model = testing_utils.get_small_mlp(1, 4, input_dim=3)
    model.layers[-1].activity_regularizer = regularizers.get('l2')
    model.activity_regularizer = regularizers.get('l2')
    model.compile(
        loss='mse',
        optimizer='rmsprop')
    def callable_loss():
      return tf.reduce_sum(model.weights[0])
    model.add_loss(callable_loss)

    x = np.random.random((1, 3))
    y = np.random.random((1, 4))

    if not tf.__internal__.tf2.enabled():
      # The layer autocast behavior only runs when autocast is enabled, so
      # in V1, the numpy inputs still need to be cast to float32.
      x = x.astype(np.float32)
      y = y.astype(np.float32)

    if use_dataset:
      dataset = tf.data.Dataset.from_tensor_slices((x, y)).batch(1)
      model.fit(dataset)
    else:
      model.train_on_batch(x, y)

    saved_model_dir = self._save_model_dir()
    tf.saved_model.save(model, saved_model_dir)
    loaded = keras_load.load(saved_model_dir)
    self.evaluate(tf.compat.v1.variables_initializer(loaded.variables))
    self.assertAllClose(self.evaluate(model.weights),
                        self.evaluate(loaded.weights))

    input_arr = tf.constant(
        np.random.random((1, 3)).astype(np.float32))
    self.assertAllClose(self.evaluate(model(input_arr)),
                        self.evaluate(loaded(input_arr)))
    # Validate losses. The order of conditional losses may change between the
    # model and loaded model, so sort the losses first.
    if tf.executing_eagerly():
      self.assertAllClose(sorted(self.evaluate(model.losses)),
                          sorted(self.evaluate(loaded.losses)))
    else:
      self.assertAllClose(self.evaluate(model.get_losses_for(None)),
                          self.evaluate(loaded.get_losses_for(None)))
      self.assertAllClose(
          sorted(self.evaluate(model.get_losses_for(input_arr))),
          sorted(self.evaluate(loaded.get_losses_for(input_arr))))
コード例 #23
0
ファイル: saved_model_test.py プロジェクト: ohsdba/keras
    def test_custom_metric_model(self):
        class CustomMetric(keras.metrics.MeanSquaredError):
            pass

        with self.cached_session():
            metric = CustomMetric()
            model = testing_utils.get_small_mlp(1, 4, input_dim=3)
            model.compile(loss='mse', optimizer='rmsprop', metrics=[metric])
            self.evaluate(tf.compat.v1.global_variables_initializer())
            self.evaluate([v.initializer for v in metric.variables])

            saved_model_dir = self._save_model_dir()
            tf.saved_model.save(model, saved_model_dir)
        with self.assertRaisesRegex(ValueError, 'custom_objects'):
            keras_load.load(saved_model_dir)

        keras_load.load(saved_model_dir, compile=False)
コード例 #24
0
    def test_finite_dataset_unknown_cardinality_no_step_with_train_and_val(
            self):
        class CaptureStdout:
            def __enter__(self):
                self._stdout = sys.stdout
                string_io = io.StringIO()
                sys.stdout = string_io
                self._stringio = string_io
                return self

            def __exit__(self, *args):
                self.output = self._stringio.getvalue()
                sys.stdout = self._stdout

        model = testing_utils.get_small_mlp(1, 4, input_dim=3)
        model.compile('rmsprop',
                      'mse',
                      run_eagerly=testing_utils.should_run_eagerly())

        inputs = np.zeros((100, 3), dtype=np.float32)
        targets = np.random.randint(0, 4, size=100, dtype=np.int32)
        dataset = tf.data.Dataset.from_tensor_slices((inputs, targets))
        dataset = dataset.filter(lambda x, y: True).batch(10)
        self.assertEqual(
            keras.backend.get_value(tf.data.experimental.cardinality(dataset)),
            tf.data.experimental.UNKNOWN_CARDINALITY)

        batch_counter = BatchCounterCallback()
        io_utils.enable_interactive_logging()
        with CaptureStdout() as capture:
            history = model.fit(dataset,
                                epochs=2,
                                callbacks=[batch_counter],
                                validation_data=dataset.take(3))

        lines = capture.output.splitlines()

        self.assertIn('10/10', lines[-1])

        self.assertLen(history.history['loss'], 2)
        self.assertEqual(batch_counter.batch_begin_count, 21)
        self.assertEqual(batch_counter.batch_end_count, 20)
        model.evaluate(dataset)
        out = model.predict(dataset)
        self.assertEqual(out.shape[0], 100)
コード例 #25
0
ファイル: saving_utils_test.py プロジェクト: zy009197/keras
  def test_trace_model_outputs(self):
    input_dim = 5 if testing_utils.get_model_type() == 'functional' else None
    model = testing_utils.get_small_mlp(10, 3, input_dim)
    inputs = tf.ones((8, 5))

    if input_dim is None:
      with self.assertRaisesRegex(ValueError, 'input shapes have not been set'):
        saving_utils.trace_model_call(model)
      model._set_inputs(inputs)

    fn = saving_utils.trace_model_call(model)
    signature_outputs = fn(inputs)
    if model.output_names:
      expected_outputs = {model.output_names[0]: model(inputs)}
    else:
      expected_outputs = {'output_1': model(inputs)}

    self._assert_all_close(expected_outputs, signature_outputs)
コード例 #26
0
    def test_sequence_input_to_fit_eval_predict(self):
        val_data = np.ones([10, 10], np.float32), np.ones([10, 1], np.float32)

        class CustomSequence(data_utils.Sequence):
            def __getitem__(self, idx):
                return np.ones([10, 10], np.float32), np.ones([10, 1],
                                                              np.float32)

            def __len__(self):
                return 2

        class CustomSequenceChangingBatchSize(data_utils.Sequence):
            def __getitem__(self, idx):
                batch_size = 10 - idx
                return (np.ones([batch_size, 10], np.float32),
                        np.ones([batch_size, 1], np.float32))

            def __len__(self):
                return 2

        model = testing_utils.get_small_mlp(num_hidden=10,
                                            num_classes=1,
                                            input_dim=10)

        model.compile(rmsprop.RMSprop(0.001), 'binary_crossentropy')
        model.fit(CustomSequence(), validation_data=val_data, epochs=2)
        model.evaluate(CustomSequence())
        model.predict(CustomSequence())

        with self.assertRaisesRegex(ValueError,
                                    '`y` argument is not supported'):
            model.fit(CustomSequence(), y=np.ones([10, 1]))

        with self.assertRaisesRegex(
                ValueError, '`sample_weight` argument is not supported'):
            model.fit(CustomSequence(), sample_weight=np.ones([10, 1]))

        model.compile(rmsprop.RMSprop(0.001), 'binary_crossentropy')
        model.fit(CustomSequenceChangingBatchSize(),
                  validation_data=val_data,
                  epochs=2)
        model.evaluate(CustomSequenceChangingBatchSize())
        model.predict(CustomSequenceChangingBatchSize())
コード例 #27
0
ファイル: saving_utils_test.py プロジェクト: yule9527/keras
    def test_trace_model_outputs_after_fitting(self):
        input_dim = 5 if testing_utils.get_model_type(
        ) == 'functional' else None
        model = testing_utils.get_small_mlp(10, 3, input_dim)
        model.compile(optimizer='sgd',
                      loss='mse',
                      run_eagerly=testing_utils.should_run_eagerly())
        model.fit(x=np.random.random((8, 5)).astype(np.float32),
                  y=np.random.random((8, 3)).astype(np.float32),
                  epochs=2)

        inputs = tf.ones((8, 5))

        fn = saving_utils.trace_model_call(model)
        signature_outputs = fn(inputs)
        if model.output_names:
            expected_outputs = {model.output_names[0]: model(inputs)}
        else:
            expected_outputs = {'output_1': model(inputs)}

        self._assert_all_close(expected_outputs, signature_outputs)
コード例 #28
0
    def test_dataset_with_sample_weights(self):
        model = testing_utils.get_small_mlp(1, 4, input_dim=3)
        optimizer = 'rmsprop'
        loss = 'mse'
        metrics = ['mae', metrics_module.CategoricalAccuracy()]
        model.compile(optimizer,
                      loss,
                      metrics=metrics,
                      run_eagerly=testing_utils.should_run_eagerly())

        inputs = np.zeros((10, 3), np.float32)
        targets = np.zeros((10, 4), np.float32)
        sample_weights = np.ones((10), np.float32)
        dataset = tf.data.Dataset.from_tensor_slices(
            (inputs, targets, sample_weights))
        dataset = dataset.repeat(100)
        dataset = dataset.batch(10)

        model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=1)
        model.evaluate(dataset, steps=2, verbose=1)
        model.predict(dataset, steps=2)
コード例 #29
0
ファイル: saving_utils_test.py プロジェクト: zy009197/keras
  def test_model_save(self):
    input_dim = 5
    model = testing_utils.get_small_mlp(10, 3, input_dim)
    inputs = tf.ones((8, 5))

    if testing_utils.get_model_type() == 'subclass':
      model._set_inputs(inputs)

    save_dir = os.path.join(self.get_temp_dir(), 'saved_model')
    tf.saved_model.save(model, save_dir)

    if model.output_names:
      output_name = model.output_names[0]
      input_name = model.input_names[0]
    else:
      output_name = 'output_1'
      input_name = 'input_1'

    self.assertAllClose({output_name: model.predict_on_batch(inputs)},
                        _import_and_infer(save_dir,
                                          {input_name: np.ones((8, 5))}))
コード例 #30
0
    def test_generator_methods_invalid_use_case(self):
        def invalid_generator():
            while 1:
                yield (0, 0, 0, 0)

        model = testing_utils.get_small_mlp(num_hidden=3,
                                            num_classes=4,
                                            input_dim=2)
        model.compile(loss='mse',
                      optimizer=rmsprop.RMSprop(1e-3),
                      run_eagerly=testing_utils.should_run_eagerly())

        with self.assertRaises(ValueError):
            model.fit_generator(invalid_generator(),
                                steps_per_epoch=5,
                                epochs=1,
                                verbose=1,
                                max_queue_size=10,
                                use_multiprocessing=False)
        with self.assertRaises(ValueError):
            model.fit_generator(custom_generator(),
                                steps_per_epoch=5,
                                epochs=1,
                                verbose=1,
                                max_queue_size=10,
                                use_multiprocessing=False,
                                validation_data=invalid_generator(),
                                validation_steps=10)
        with self.assertRaises(ValueError):
            model.predict_generator(invalid_generator(),
                                    steps=5,
                                    max_queue_size=10,
                                    use_multiprocessing=False)
        with self.assertRaises(ValueError):
            model.evaluate_generator(invalid_generator(),
                                     steps=5,
                                     max_queue_size=10,
                                     use_multiprocessing=False)