def test_subclassed_model_with_feature_columns(self):
        col_a = fc.numeric_column('a')
        col_b = fc.numeric_column('b')

        dnn_model = TestDNNModel([col_a, col_b], 20)

        dnn_model.compile(
            optimizer='rmsprop',
            loss='categorical_crossentropy',
            metrics=['accuracy'],
            run_eagerly=testing_utils.should_run_eagerly(),
            run_distributed=testing_utils.should_run_distributed())

        x = {'a': np.random.random((10, 1)), 'b': np.random.random((10, 1))}
        y = np.random.randint(20, size=(10, 1))
        y = keras.utils.to_categorical(y, num_classes=20)
        dnn_model.fit(x=x, y=y, epochs=1, batch_size=5)
        dnn_model.fit(x=x, y=y, epochs=1, batch_size=5)
        dnn_model.evaluate(x=x, y=y, batch_size=5)
        dnn_model.predict(x=x, batch_size=5)
Пример #2
0
 def test_loss_correctness(self):
     # Test that training loss is the same in eager and graph
     # (by comparing it to a reference value in a deterministic case)
     layers = [
         keras.layers.Dense(3, activation='relu',
                            kernel_initializer='ones'),
         keras.layers.Dense(2,
                            activation='softmax',
                            kernel_initializer='ones')
     ]
     model = testing_utils.get_model_from_layers(layers, input_shape=(4, ))
     model.compile(loss='sparse_categorical_crossentropy',
                   optimizer=rmsprop.RMSprop(learning_rate=0.001),
                   run_eagerly=testing_utils.should_run_eagerly(),
                   run_distributed=testing_utils.should_run_distributed())
     x = np.ones((100, 4))
     np.random.seed(123)
     y = np.random.randint(0, 1, size=(100, 1))
     history = model.fit(x, y, epochs=1, batch_size=10)
     self.assertAlmostEqual(history.history['loss'][-1], 0.5836, 4)
    def test_sparse_tensor_outputs(self):
        # Create a model that accepts an input, converts it to Ragged, and
        # converts the ragged tensor back to a dense tensor.
        layers = [ToSparse()]
        model = testing_utils.get_model_from_layers(layers,
                                                    input_shape=(None, ))
        model._run_distributed = testing_utils.should_run_distributed()
        model._run_eagerly = testing_utils.should_run_eagerly()

        # Define some input data with additional padding.
        input_data = np.array([[1, 0, 0], [2, 3, 0]])
        output = model.predict(input_data)

        expected_indices = np.array([[0, 0], [1, 0], [1, 1]])
        expected_values = np.array([1, 2, 3])
        expected_dense_shape = np.array([2, 3])

        self.assertAllEqual(output.indices, expected_indices)
        self.assertAllEqual(output.values, expected_values)
        self.assertAllEqual(output.dense_shape, expected_dense_shape)
Пример #4
0
    def test_defun_on_call(self):
        # Check that one can subclass Sequential and place the `call` in a `defun`.

        class MySequential(keras.Sequential):
            def __init__(self, name=None):
                super(MySequential, self).__init__(name=name)
                self.call = function.defun(self.call)

        model = MySequential()
        model.add(keras.layers.Dense(4, activation='relu'))
        model.add(keras.layers.Dense(5, activation='softmax'))

        model.compile(loss='mse',
                      optimizer='rmsprop',
                      run_eagerly=testing_utils.should_run_eagerly(),
                      run_distributed=testing_utils.should_run_distributed())

        x = np.random.random((2, 6))
        y = np.random.random((2, 5))
        model.fit(x, y, epochs=1)
Пример #5
0
  def test_optimizer_dependency(self):
    model = _get_model()
    opt = adam.AdamOptimizer(.01)
    model.compile(
        optimizer=opt,
        loss='mse',
        run_eagerly=testing_utils.should_run_eagerly(),
        run_distributed=testing_utils.should_run_distributed())

    model.fit(
        x=np.array([[1., 2., 3., 4.]]),
        y=np.array([[1., 1., 1., 1.]]),
        epochs=2)
    save_prefix = os.path.join(self.get_temp_dir(), 'ckpt')
    beta1_power, _ = opt._get_beta_accumulators()
    self.evaluate(beta1_power.assign(12.))
    model.save_weights(save_prefix)
    self.evaluate(beta1_power.assign(13.))
    model.load_weights(save_prefix)
    self.assertEqual(12., self.evaluate(beta1_power))
Пример #6
0
    def test_regularization_shared_layer_in_different_models(
            self, regularizer):
        shared_dense = keras.layers.Dense(NUM_CLASSES,
                                          kernel_regularizer=regularizer,
                                          activity_regularizer=regularizer)
        models = []
        for _ in range(2):
            input_tensor = keras.layers.Input(shape=(DATA_DIM, ))
            unshared_dense = keras.layers.Dense(NUM_CLASSES,
                                                kernel_regularizer=regularizer)
            out = unshared_dense(shared_dense(input_tensor))
            models.append(keras.models.Model(input_tensor, out))

        model = self.create_multi_input_model_from(layer1=models[0],
                                                   layer2=models[1])
        model.compile(loss='categorical_crossentropy',
                      optimizer='sgd',
                      run_eagerly=testing_utils.should_run_eagerly(),
                      run_distributed=testing_utils.should_run_distributed())
        self.assertEqual(len(model.losses), 14)
    def test_further_tuning_post_injection(self):
        """Test that models can be tuned with multiple calls to 'adapt'."""

        input_dataset = np.array([1, 2, 3, 4, 5])

        layer = get_layer()

        input_data = keras.Input(shape=(1, ))
        output = layer(input_data)
        model = keras.Model(input_data, output)
        model._run_eagerly = testing_utils.should_run_eagerly()
        model._run_distributed = testing_utils.should_run_distributed()

        combiner = layer._combiner
        updates = combiner.extract(combiner.compute(input_dataset))
        layer._set_state_variables(updates)
        self.assertAllEqual([[16], [17], [18]], model.predict([1., 2., 3.]))

        layer.adapt(np.array([1, 2]), reset_state=False)
        self.assertAllEqual([[19], [20], [21]], model.predict([1., 2., 3.]))
Пример #8
0
  def test_model(self, strategy_fn, use_operator=False, use_regularizer=False,
                 cloning=True):
    if not self._is_strategy_supported(strategy_fn):
      return
    regularizer = IdentityRegularizer() if use_regularizer else None
    with strategy_fn().scope():
      with policy.policy_scope('infer_float32_vars'):
        x = layers.Input(shape=(1,), batch_size=2, dtype=dtypes.float16)
        layer = AddLayer(assert_type=dtypes.float16, use_operator=use_operator,
                         regularizer=regularizer)
        y = layer(x)
        y = math_ops.cast(y, dtypes.float32)
        model = models.Model(inputs=x, outputs=y)

        def loss_fn(y_true, y_pred):
          del y_true
          return math_ops.reduce_mean(y_pred)

        # Learning rate is small enough that if applied to a float16 variable,
        # the variable will not change. So this tests the learning rate not
        # applied to a float16 value, but instead the float32 variable.
        opt = gradient_descent.SGD(2 ** -14)
        model.compile(
            opt,
            loss=loss_fn,
            cloning=cloning,
            run_eagerly=testing_utils.should_run_eagerly(),
            run_distributed=testing_utils.should_run_distributed())

    self.assertEqual(backend.eval(layer.v), 1)
    x = np.ones((2, 1))
    y = np.ones((2, 1))
    dataset = dataset_ops.Dataset.from_tensor_slices((x, y)).batch(2)
    model.fit(dataset)
    # Variable starts at 1, and should have gradient of 2 ** -14 subtracted
    # from it.
    expected = 1 - 2 ** -14
    if use_regularizer:
      # Regularizer adds another 2 ** -14 to the gradient.
      expected -= 2 ** -14
    self.assertEqual(backend.eval(layer.v), expected)
Пример #9
0
  def test_fixed_loss_scaling(self, strategy_fn, cloning=True):
    # Note: We do not test mixed precision in this method, only loss scaling.
    if not self._is_strategy_supported(strategy_fn):
      return
    loss_scale = 8.
    batch_size = 4
    with strategy_fn().scope():
      x = layers.Input(shape=(1,), batch_size=batch_size)
      layer = AddLayer()
      y = layer(x)

      # The gradient of 'y' at this point is 1. With loss scaling, the gradient
      # is 'loss_scale'. We divide by the batch size since the loss is averaged
      # across batch elements.
      expected_gradient = loss_scale / batch_size
      identity_with_grad_check_fn = (
          mp_test_util.create_identity_with_grad_check_fn([expected_gradient]))
      y = core.Lambda(identity_with_grad_check_fn)(y)
      model = models.Model(inputs=x, outputs=y)

      def loss_fn(y_true, y_pred):
        del y_true
        return math_ops.reduce_mean(y_pred)

      opt = gradient_descent.SGD(1.)
      opt = loss_scale_optimizer.LossScaleOptimizer(opt, loss_scale)
      model.compile(
          opt,
          loss=loss_fn,
          cloning=cloning,
          run_eagerly=testing_utils.should_run_eagerly(),
          run_distributed=testing_utils.should_run_distributed())

    self.assertEqual(backend.eval(layer.v), 1)
    x = np.ones((batch_size, 1))
    y = np.ones((batch_size, 1))
    dataset = dataset_ops.Dataset.from_tensor_slices((x, y)).batch(batch_size)
    model.fit(dataset)
    # Variable starts at 1, and should have gradient of 1 subtracted from it.
    expected = 0
    self.assertEqual(backend.eval(layer.v), expected)
Пример #10
0
    def test_layernorm_convnet_channel_last(self):
        model = keras.models.Sequential()
        norm = keras.layers.LayerNormalization(input_shape=(4, 4, 3))
        model.add(norm)
        model.compile(
            loss='mse',
            optimizer=gradient_descent.GradientDescentOptimizer(0.01),
            run_eagerly=testing_utils.should_run_eagerly(),
            run_distributed=testing_utils.should_run_distributed())

        # centered on 5.0, variance 10.0
        x = np.random.normal(loc=5.0, scale=10.0, size=(1000, 4, 4, 3))
        model.fit(x, x, epochs=4, verbose=0)
        out = model.predict(x)
        out -= np.reshape(keras.backend.eval(norm.beta), (1, 1, 1, 3))
        out /= np.reshape(keras.backend.eval(norm.gamma), (1, 1, 1, 3))

        np.testing.assert_allclose(np.mean(out, axis=(0, 1, 2)),
                                   0.0,
                                   atol=1e-1)
        np.testing.assert_allclose(np.std(out, axis=(0, 1, 2)), 1.0, atol=1e-1)
Пример #11
0
    def test_generator_methods_invalid_use_case(self):
        def invalid_generator():
            while 1:
                yield (0, 0, 0, 0)

        model = testing_utils.get_small_mlp(num_hidden=3,
                                            num_classes=4,
                                            input_dim=2)
        model.compile(loss='mse',
                      optimizer=rmsprop.RMSprop(1e-3),
                      run_eagerly=testing_utils.should_run_eagerly(),
                      run_distributed=testing_utils.should_run_distributed())

        err_msg = 'Output of generator should be a tuple of 1 or 2 or 3 elements'
        with self.assertRaisesRegex(ValueError, err_msg):
            model.fit_generator(invalid_generator(),
                                steps_per_epoch=5,
                                epochs=1,
                                verbose=1,
                                max_queue_size=10,
                                use_multiprocessing=False)
        with self.assertRaisesRegex(ValueError, err_msg):
            model.fit_generator(custom_generator(),
                                steps_per_epoch=5,
                                epochs=1,
                                verbose=1,
                                max_queue_size=10,
                                use_multiprocessing=False,
                                validation_data=invalid_generator(),
                                validation_steps=10)
        with self.assertRaisesRegex(ValueError, err_msg):
            model.predict_generator(invalid_generator(),
                                    steps=5,
                                    max_queue_size=10,
                                    use_multiprocessing=False)
        with self.assertRaisesRegex(ValueError, err_msg):
            model.evaluate_generator(invalid_generator(),
                                     steps=5,
                                     max_queue_size=10,
                                     use_multiprocessing=False)
Пример #12
0
  def test_vector_classification_shared_model(self):
    # Test that Sequential models that feature internal updates
    # and internal losses can be shared.
    np.random.seed(1337)
    (x_train, y_train), _ = testing_utils.get_test_data(
        train_samples=100,
        test_samples=0,
        input_shape=(10,),
        num_classes=2)
    y_train = keras.utils.to_categorical(y_train)

    base_model = testing_utils.get_model_from_layers(
        [keras.layers.Dense(16,
                            activation='relu',
                            kernel_regularizer=keras.regularizers.l2(1e-5),
                            bias_regularizer=keras.regularizers.l2(1e-5)),
         keras.layers.BatchNormalization()],
        input_shape=x_train.shape[1:])
    x = keras.layers.Input(x_train.shape[1:])
    y = base_model(x)
    y = keras.layers.Dense(y_train.shape[-1], activation='softmax')(y)
    model = keras.models.Model(x, y)
    model.compile(
        loss='categorical_crossentropy',
        optimizer=keras.optimizer_v2.adam.Adam(0.005),
        metrics=['acc'],
        run_eagerly=testing_utils.should_run_eagerly(),
        run_distributed=testing_utils.should_run_distributed())
    if not testing_utils.should_run_eagerly():
      self.assertEqual(len(model.get_losses_for(None)), 2)
      self.assertEqual(len(model.get_updates_for(x)), 2)
    history = model.fit(x_train, y_train, epochs=10, batch_size=10,
                        validation_data=(x_train, y_train),
                        verbose=2)
    self.assertGreater(history.history['val_acc'][-1], 0.7)
    _, val_acc = model.evaluate(x_train, y_train)
    self.assertAlmostEqual(history.history['val_acc'][-1], val_acc)
    predictions = model.predict(x_train)
    self.assertEqual(predictions.shape, (x_train.shape[0], 2))
Пример #13
0
    def test_generator_input_to_fit_eval_predict(self):
        val_data = np.ones([10, 10], np.float32), np.ones([10, 1], np.float32)

        def ones_generator():
            while True:
                yield np.ones([10, 10], np.float32), np.ones([10, 1],
                                                             np.float32)

        model = testing_utils.get_small_mlp(num_hidden=10,
                                            num_classes=1,
                                            input_dim=10)

        model.compile(rmsprop.RMSprop(0.001),
                      'binary_crossentropy',
                      run_eagerly=testing_utils.should_run_eagerly(),
                      run_distributed=testing_utils.should_run_distributed())
        model.fit(ones_generator(),
                  steps_per_epoch=2,
                  validation_data=val_data,
                  epochs=2)
        model.evaluate(ones_generator(), steps=2)
        model.predict(ones_generator(), steps=2)
Пример #14
0
    def test_stateful_LSTM_training(self):
        # See b/123587692 for more context.
        vocab_size = 20
        embedding_dim = 10
        batch_size = 8
        timestep = 12
        units = 5
        x = np.random.randint(0, vocab_size, size=(batch_size, timestep))
        y = np.random.randint(0, vocab_size, size=(batch_size, timestep))

        model = keras.Sequential([
            keras.layers.Embedding(vocab_size,
                                   embedding_dim,
                                   batch_input_shape=[batch_size, timestep]),
            rnn.LSTM(units, return_sequences=True, stateful=True),
            keras.layers.Dense(vocab_size)
        ])
        model.compile(optimizer='adam',
                      loss='sparse_categorical_crossentropy',
                      run_eagerly=testing_utils.should_run_eagerly(),
                      run_distributed=testing_utils.should_run_distributed())
        model.fit(x, y, epochs=1, shuffle=False)
Пример #15
0
  def test_batchnorm_convnet(self):
    if test.is_gpu_available(cuda_only=True):
      with self.session(use_gpu=True):
        model = keras.models.Sequential()
        norm = keras.layers.BatchNormalization(
            axis=1, input_shape=(3, 4, 4), momentum=0.8)
        model.add(norm)
        model.compile(
            loss='mse',
            optimizer=gradient_descent.GradientDescentOptimizer(0.01),
            run_eagerly=testing_utils.should_run_eagerly(),
            run_distributed=testing_utils.should_run_distributed())

        # centered on 5.0, variance 10.0
        x = np.random.normal(loc=5.0, scale=10.0, size=(1000, 3, 4, 4))
        model.fit(x, x, epochs=4, verbose=0)
        out = model.predict(x)
        out -= np.reshape(keras.backend.eval(norm.beta), (1, 3, 1, 1))
        out /= np.reshape(keras.backend.eval(norm.gamma), (1, 3, 1, 1))

        np.testing.assert_allclose(np.mean(out, axis=(0, 2, 3)), 0.0, atol=1e-1)
        np.testing.assert_allclose(np.std(out, axis=(0, 2, 3)), 1.0, atol=1e-1)
Пример #16
0
    def test_dataset_with_sample_weights(self):
        model = testing_utils.get_small_mlp(1, 4, input_dim=3)
        optimizer = 'rmsprop'
        loss = 'mse'
        metrics = ['mae', metrics_module.CategoricalAccuracy()]
        model.compile(optimizer,
                      loss,
                      metrics=metrics,
                      run_eagerly=testing_utils.should_run_eagerly(),
                      run_distributed=testing_utils.should_run_distributed())

        inputs = np.zeros((10, 3), np.float32)
        targets = np.zeros((10, 4), np.float32)
        sample_weights = np.ones((10), np.float32)
        dataset = dataset_ops.Dataset.from_tensor_slices(
            (inputs, targets, sample_weights))
        dataset = dataset.repeat(100)
        dataset = dataset.batch(10)

        model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=1)
        model.evaluate(dataset, steps=2, verbose=1)
        model.predict(dataset, steps=2)
Пример #17
0
    def test_clone_functional_with_masking(self, share_weights):
        if share_weights:
            clone_fn = functools.partial(keras.models._clone_functional_model,
                                         layer_fn=models.share_weights)
        else:
            clone_fn = keras.models.clone_model

        x = np.array([[[1.], [1.]], [[0.], [0.]]])
        inputs = keras.Input((2, 1))
        outputs = keras.layers.Masking(mask_value=0)(inputs)
        outputs = keras.layers.TimeDistributed(
            keras.layers.Dense(1, kernel_initializer='one'))(outputs)
        model = keras.Model(inputs, outputs)

        model = clone_fn(model)
        model.compile(loss='mse',
                      optimizer=testing_utils.get_v2_optimizer('adam'),
                      run_eagerly=testing_utils.should_run_eagerly(),
                      run_distributed=testing_utils.should_run_distributed())
        y = np.array([[[1], [1]], [[1], [1]]])
        loss = model.train_on_batch(x, y)
        self.assertEqual(float(loss), 0.)
Пример #18
0
    def test_iterators_running_out_of_data(self):
        model = testing_utils.get_small_mlp(1, 4, input_dim=3)
        optimizer = 'rmsprop'
        loss = 'mse'
        metrics = ['mae']
        model.compile(optimizer,
                      loss,
                      metrics=metrics,
                      run_eagerly=testing_utils.should_run_eagerly(),
                      run_distributed=testing_utils.should_run_distributed())

        inputs = np.zeros((10, 3), np.float32)
        targets = np.zeros((10, 4), np.float32)
        dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets))
        dataset = dataset.repeat(2)
        dataset = dataset.batch(10)
        iterator = dataset_ops.make_one_shot_iterator(dataset)

        with test.mock.patch.object(logging, 'warning') as mock_log:
            model.fit(iterator, epochs=1, steps_per_epoch=3, verbose=0)
            self.assertRegexpMatches(str(mock_log.call_args),
                                     'dataset iterator ran out of data')
Пример #19
0
    def test_metrics_correctness_with_iterator(self):
        layers = [
            keras.layers.Dense(8,
                               activation='relu',
                               input_dim=4,
                               kernel_initializer='ones'),
            keras.layers.Dense(1,
                               activation='sigmoid',
                               kernel_initializer='ones')
        ]

        model = testing_utils.get_model_from_layers(layers, (4, ))

        model.compile(loss='binary_crossentropy',
                      metrics=['accuracy',
                               metrics_module.BinaryAccuracy()],
                      optimizer='rmsprop',
                      run_eagerly=testing_utils.should_run_eagerly(),
                      run_distributed=testing_utils.should_run_distributed())

        np.random.seed(123)
        x = np.random.randint(10, size=(100, 4)).astype(np.float32)
        y = np.random.randint(2, size=(100, 1)).astype(np.float32)
        dataset = dataset_ops.Dataset.from_tensor_slices((x, y))
        dataset = dataset.batch(10)
        iterator = dataset_ops.make_one_shot_iterator(dataset)
        outs = model.evaluate(iterator, steps=10)
        self.assertEqual(np.around(outs[1], decimals=1), 0.5)
        self.assertEqual(np.around(outs[2], decimals=1), 0.5)

        y = np.zeros((100, 1), dtype=np.float32)
        dataset = dataset_ops.Dataset.from_tensor_slices((x, y))
        dataset = dataset.repeat(100)
        dataset = dataset.batch(10)
        iterator = dataset_ops.make_one_shot_iterator(dataset)
        outs = model.evaluate(iterator, steps=10)
        self.assertEqual(outs[1], 0.)
        self.assertEqual(outs[2], 0.)
Пример #20
0
    def test_finite_dataset_known_cardinality_no_steps_arg(self):
        model = testing_utils.get_small_mlp(1, 4, input_dim=3)
        model.compile('rmsprop',
                      'mse',
                      run_eagerly=testing_utils.should_run_eagerly(),
                      run_distributed=testing_utils.should_run_distributed())

        inputs = np.zeros((100, 3), dtype=np.float32)
        targets = np.random.randint(0, 4, size=100, dtype=np.int32)
        dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets))
        dataset = dataset.batch(10)

        batch_counter = BatchCounterCallback()
        history = model.fit(dataset,
                            epochs=2,
                            verbose=1,
                            callbacks=[batch_counter])

        self.assertLen(history.history['loss'], 2)
        self.assertEqual(batch_counter.batch_count, 20)
        model.evaluate(dataset)
        out = model.predict(dataset)
        self.assertEqual(out.shape[0], 100)
Пример #21
0
    def test_raw_variable_assignment(self):
        class RawVariableLayer(keras.layers.Layer):
            def __init__(self, **kwargs):
                super(RawVariableLayer, self).__init__(**kwargs)
                # Test variables in nested structure.
                self.var_list = [
                    variables.Variable(1.), {
                        'a': variables.Variable(2.)
                    }
                ]

            def call(self, inputs):
                return inputs * self.var_list[0] * self.var_list[1]['a']

        model = testing_utils.get_model_from_layers([RawVariableLayer()],
                                                    input_shape=(10, ))
        model.compile('sgd',
                      'mse',
                      run_eagerly=testing_utils.should_run_eagerly(),
                      run_distributed=testing_utils.should_run_distributed())
        x, y = np.ones((10, 10)), np.ones((10, 10))
        # Checks that variables get initialized.
        model.fit(x, y, batch_size=2, epochs=2)
    def test_subclassed_model_with_feature_columns_with_ds_input(self):
        col_a = fc.numeric_column('a')
        col_b = fc.numeric_column('b')

        dnn_model = TestDNNModel([col_a, col_b], 20)

        dnn_model.compile(
            optimizer='rmsprop',
            loss='categorical_crossentropy',
            metrics=['accuracy'],
            run_eagerly=testing_utils.should_run_eagerly(),
            run_distributed=testing_utils.should_run_distributed())

        y = np.random.randint(20, size=(100, 1))
        y = keras.utils.to_categorical(y, num_classes=20)
        x = {'a': np.random.random((100, 1)), 'b': np.random.random((100, 1))}
        ds1 = dataset_ops.Dataset.from_tensor_slices(x)
        ds2 = dataset_ops.Dataset.from_tensor_slices(y)
        ds = dataset_ops.Dataset.zip((ds1, ds2)).batch(5)
        dnn_model.fit(ds, steps_per_epoch=1)
        dnn_model.fit(ds, steps_per_epoch=1)
        dnn_model.evaluate(ds, steps=1)
        dnn_model.predict(ds, steps=1)
    def test_sequential_model_with_ds_input(self):
        columns = [fc.numeric_column('a')]
        model = keras.models.Sequential([
            fc.DenseFeatures(columns),
            keras.layers.Dense(64, activation='relu'),
            keras.layers.Dense(20, activation='softmax')
        ])
        model.compile(optimizer='rmsprop',
                      loss='categorical_crossentropy',
                      metrics=['accuracy'],
                      run_eagerly=testing_utils.should_run_eagerly(),
                      run_distributed=testing_utils.should_run_distributed())

        y = np.random.randint(20, size=(100, 1))
        y = keras.utils.to_categorical(y, num_classes=20)
        x = {'a': np.random.random((100, 1))}
        ds1 = dataset_ops.Dataset.from_tensor_slices(x)
        ds2 = dataset_ops.Dataset.from_tensor_slices(y)
        ds = dataset_ops.Dataset.zip((ds1, ds2)).batch(5)
        model.fit(ds, steps_per_epoch=1)
        model.fit(ds, steps_per_epoch=1)
        model.evaluate(ds, steps=1)
        model.predict(ds, steps=1)
Пример #24
0
 def test_wide_deep_model_as_layer(self):
   linear_model = linear.LinearModel(units=1)
   dnn_model = sequential.Sequential([core.Dense(units=1)])
   linear_input = input_layer.Input(shape=(3,), name='linear')
   dnn_input = input_layer.Input(shape=(5,), name='dnn')
   wide_deep_model = wide_deep.WideDeepModel(linear_model, dnn_model)
   wide_deep_output = wide_deep_model((linear_input, dnn_input))
   input_b = input_layer.Input(shape=(1,), name='b')
   output_b = core.Dense(units=1)(input_b)
   model = training.Model(
       inputs=[linear_input, dnn_input, input_b],
       outputs=[wide_deep_output + output_b])
   linear_input_np = np.random.uniform(low=-5, high=5, size=(64, 3))
   dnn_input_np = np.random.uniform(low=-5, high=5, size=(64, 5))
   input_b_np = np.random.uniform(low=-5, high=5, size=(64,))
   output_np = linear_input_np[:, 0] + .2 * dnn_input_np[:, 1] + input_b_np
   model.compile(
       optimizer='sgd',
       loss='mse',
       metrics=[],
       run_eagerly=testing_utils.should_run_eagerly(),
       run_distributed=testing_utils.should_run_distributed())
   model.fit([linear_input_np, dnn_input_np, input_b_np], output_np, epochs=5)
Пример #25
0
    def test_model_methods_with_eager_tensors_single_io(self):
        if not context.executing_eagerly():
            # Only test V2 Function and V2 Eager modes, as V1 Graph mode with
            # symbolic tensors has different requirements.
            return

        model = testing_utils.get_small_mlp(10, 4, 3)

        optimizer = rmsprop.RMSprop(learning_rate=0.001)
        loss = 'mse'
        metrics = ['mae', metrics_module.CategoricalAccuracy()]
        model.compile(optimizer,
                      loss,
                      metrics=metrics,
                      run_eagerly=testing_utils.should_run_eagerly(),
                      run_distributed=testing_utils.should_run_distributed())

        inputs = array_ops.zeros(shape=(10, 3))
        targets = array_ops.zeros(shape=(10, 4))

        model.fit(inputs, targets, epochs=1, batch_size=2, verbose=0)
        model.fit(inputs,
                  targets,
                  epochs=1,
                  batch_size=3,
                  verbose=0,
                  shuffle=False)
        model.fit(inputs,
                  targets,
                  epochs=1,
                  batch_size=4,
                  verbose=0,
                  validation_data=(inputs, targets))
        model.evaluate(inputs, targets, batch_size=2, verbose=0)
        model.predict(inputs, batch_size=2)
        model.train_on_batch(inputs, targets)
        model.test_on_batch(inputs, targets)
Пример #26
0
    def test_finite_dataset_unknown_cardinality_out_of_data(self):
        model = testing_utils.get_small_mlp(1, 4, input_dim=3)
        model.compile('rmsprop',
                      'mse',
                      run_eagerly=testing_utils.should_run_eagerly(),
                      run_distributed=testing_utils.should_run_distributed())

        inputs = np.zeros((100, 3), dtype=np.float32)
        targets = np.random.randint(0, 4, size=100, dtype=np.int32)
        dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets))
        dataset = dataset.filter(lambda x, y: True).batch(10)
        self.assertEqual(
            keras.backend.get_value(cardinality.cardinality(dataset)),
            cardinality.UNKNOWN)

        batch_counter = BatchCounterCallback()
        with test.mock.patch.object(logging, 'warning') as mock_log:
            # steps_per_epoch (200) is greater than the dataset size (100). As this is
            # unexpected, training will stop and not make it to the second epoch.
            history = model.fit(dataset,
                                epochs=2,
                                verbose=1,
                                callbacks=[batch_counter],
                                steps_per_epoch=200)
            self.assertIn('ran out of data; interrupting training.',
                          str(mock_log.call_args))
            self.assertIn(
                'can generate at least '
                '`steps_per_epoch * epochs` batches (in this case, 400 batches). '
                'You may need to use the repeat() function when '
                'building your dataset.', str(mock_log.call_args))

        self.assertLen(history.history['loss'], 1)
        self.assertEqual(batch_counter.batch_count, 10)
        model.evaluate(dataset)
        out = model.predict(dataset)
        self.assertEqual(out.shape[0], 100)
Пример #27
0
    def test_sparse_scipy_eval_input_dicts(self):
        # Create a model that accepts a sparse input and converts the sparse tensor
        # back to a dense tensor. Scipy sparse matrices are limited to 2D, so use
        # a one-dimensional shape; note also that scipy's default dtype is int64.
        if testing_utils.get_model_type() == "subclass":
            input_name = "input_1"  # Subclass models don"t support input names.
        else:
            input_name = "test_input_name"
        model_input = input_layer.Input(shape=(3, ),
                                        sparse=True,
                                        name=input_name,
                                        dtype=dtypes.int64)
        layers = [ToDense(default_value=-1)]
        model = get_model_from_layers_with_input(layers,
                                                 model_input=model_input)
        model.compile(optimizer="sgd",
                      loss="mse",
                      metrics=["accuracy"],
                      run_distributed=testing_utils.should_run_distributed())

        input_data = {
            input_name:
            scipy.sparse.coo_matrix(([1, 2, 3], ([0, 1, 1], [0, 0, 1])),
                                    shape=[2, 3])
        }
        expected_output = np.array([[1, -1, -1], [2, 3, -1]])
        output = model.evaluate(input_data, expected_output, steps=1)
        self.assertAllEqual(1.0, output[-1])

        input_data_2 = {
            input_name:
            scipy.sparse.coo_matrix(
                ([5, 6, 7, 8], ([0, 1, 1, 2], [0, 0, 1, 1])), shape=[3, 3])
        }
        expected_output_2 = np.array([[5, -1, -1], [6, 7, -1], [-1, 8, -1]])
        output_2 = model.evaluate(input_data_2, expected_output_2, steps=1)
        self.assertAllEqual(1.0, output_2[-1])
Пример #28
0
    def test_extract_model_metrics(self):
        a = keras.layers.Input(shape=(3, ), name='input_a')
        b = keras.layers.Input(shape=(3, ), name='input_b')

        dense = keras.layers.Dense(4, name='dense')
        c = dense(a)
        d = dense(b)
        e = keras.layers.Dropout(0.5, name='dropout')(c)

        model = keras.models.Model([a, b], [d, e])
        extract_metrics = saving_utils.extract_model_metrics(model)
        self.assertEqual(None, extract_metrics)

        extract_metric_names = [
            'dense_binary_accuracy', 'dropout_binary_accuracy',
            'dense_mean_squared_error', 'dropout_mean_squared_error'
        ]
        if tf2.enabled():
            extract_metric_names.extend(['dense_mae', 'dropout_mae'])
        else:
            extract_metric_names.extend(
                ['dense_mean_absolute_error', 'dropout_mean_absolute_error'])

        model_metric_names = ['loss', 'dense_loss', 'dropout_loss'
                              ] + extract_metric_names
        model.compile(loss='mae',
                      metrics=[
                          keras.metrics.BinaryAccuracy(), 'mae',
                          keras.metrics.mean_squared_error
                      ],
                      optimizer=rmsprop.RMSPropOptimizer(learning_rate=0.01),
                      run_eagerly=testing_utils.should_run_eagerly(),
                      run_distributed=testing_utils.should_run_distributed())
        extract_metrics = saving_utils.extract_model_metrics(model)
        self.assertEqual(set(model_metric_names), set(model.metrics_names))
        self.assertEqual(set(extract_metric_names),
                         set(extract_metrics.keys()))
Пример #29
0
    def test_sequential_deferred_build_with_np_arrays(self):
        num_hidden = 5
        input_dim = 3
        batch_size = 5
        num_classes = 2

        model = testing_utils.get_small_sequential_mlp(num_hidden, num_classes)
        model.compile(loss='mse',
                      optimizer='rmsprop',
                      metrics=[keras.metrics.CategoricalAccuracy()],
                      run_eagerly=testing_utils.should_run_eagerly(),
                      run_distributed=testing_utils.should_run_distributed())
        self.assertEqual(len(model.layers), 2)
        with self.assertRaisesRegexp(
                ValueError, 'Weights for model .* have not yet been created'):
            len(model.weights)
        self.assertFalse(model.built)

        x = np.random.random((batch_size, input_dim))
        y = np.random.random((batch_size, num_classes))
        model.fit(x, y, epochs=1)
        self.assertTrue(model.built)
        self.assertFalse(model._is_graph_network)
        self.assertEqual(len(model.weights), 2 * 2)
Пример #30
0
 def test_loss_correctness_with_iterator(self):
     # Test that training loss is the same in eager and graph
     # (by comparing it to a reference value in a deterministic case)
     layers = [
         keras.layers.Dense(3, activation='relu',
                            kernel_initializer='ones'),
         keras.layers.Dense(2,
                            activation='softmax',
                            kernel_initializer='ones')
     ]
     model = testing_utils.get_model_from_layers(layers, input_shape=(4, ))
     model.compile(loss='sparse_categorical_crossentropy',
                   optimizer=rmsprop.RMSprop(learning_rate=0.001),
                   run_eagerly=testing_utils.should_run_eagerly(),
                   run_distributed=testing_utils.should_run_distributed())
     x = np.ones((100, 4), dtype=np.float32)
     np.random.seed(123)
     y = np.random.randint(0, 1, size=(100, 1))
     dataset = dataset_ops.Dataset.from_tensor_slices((x, y))
     dataset = dataset.repeat(100)
     dataset = dataset.batch(10)
     iterator = dataset_ops.make_one_shot_iterator(dataset)
     history = model.fit(iterator, epochs=1, steps_per_epoch=10)
     self.assertAlmostEqual(history.history['loss'][-1], 0.5836, 4)