Exemple #1
0
 def test_wide_deep_model_with_single_input(self):
     linear_model = linear.LinearModel(units=1)
     dnn_model = sequential.Sequential([core.Dense(units=1, input_dim=3)])
     wide_deep_model = wide_deep.WideDeepModel(linear_model, dnn_model)
     inputs = np.random.uniform(low=-5., high=5., size=(64, 3))
     output = .3 * inputs[:, 0]
     wide_deep_model.compile(optimizer=['sgd', 'adam'],
                             loss='mse',
                             metrics=[],
                             run_eagerly=test_utils.should_run_eagerly())
     wide_deep_model.fit(inputs, output, epochs=5)
Exemple #2
0
 def test_linear_model_as_layer(self):
     input_a = input_layer.Input(shape=(1,), name="a")
     output_a = linear.LinearModel()(input_a)
     input_b = input_layer.Input(shape=(1,), name="b")
     output_b = core.Dense(units=1)(input_b)
     output = output_a + output_b
     model = training.Model(inputs=[input_a, input_b], outputs=[output])
     input_a_np = np.random.uniform(low=-5.0, high=5.0, size=(64, 1))
     input_b_np = np.random.uniform(low=-5.0, high=5.0, size=(64, 1))
     output_np = 0.3 * input_a_np + 0.2 * input_b_np
     model.compile("sgd", "mse", [])
     model.fit([input_a_np, input_b_np], output_np, epochs=5)
Exemple #3
0
 def test_config(self):
     linear_model = linear.LinearModel(units=1)
     dnn_model = sequential.Sequential([core.Dense(units=1, input_dim=3)])
     wide_deep_model = wide_deep.WideDeepModel(linear_model, dnn_model)
     config = wide_deep_model.get_config()
     cloned_wide_deep_model = wide_deep.WideDeepModel.from_config(config)
     self.assertEqual(linear_model.units,
                      cloned_wide_deep_model.linear_model.units)
     self.assertEqual(
         dnn_model.layers[0].units,
         cloned_wide_deep_model.dnn_model.layers[0].units,
     )
Exemple #4
0
 def test_linear_model_with_mismatched_dict_inputs(self):
     model = linear.LinearModel()
     input_a = np.random.uniform(low=-5., high=5., size=(64, 1))
     input_b = np.random.uniform(low=-5., high=5., size=(64, 1))
     output = .3 * input_a + .2 * input_b
     model.compile('sgd', 'mse', [])
     model.build({
         'a': tf.TensorShape([None, 1]),
         'b': tf.TensorShape([None, 1])
     })
     with self.assertRaisesRegex(ValueError, 'Missing keys'):
         model.fit({'c': input_a, 'b': input_b}, output, epochs=5)
Exemple #5
0
    def test_config_with_custom_objects(self):
        def my_activation(x):
            return x

        linear_model = linear.LinearModel(units=1)
        dnn_model = sequential.Sequential([core.Dense(units=1, input_dim=3)])
        wide_deep_model = wide_deep.WideDeepModel(linear_model,
                                                  dnn_model,
                                                  activation=my_activation)
        config = wide_deep_model.get_config()
        cloned_wide_deep_model = wide_deep.WideDeepModel.from_config(
            config, custom_objects={"my_activation": my_activation})
        self.assertEqual(cloned_wide_deep_model.activation, my_activation)
Exemple #6
0
 def test_linear_model_with_sparse_input(self):
     indices = tf.constant([[0, 0], [0, 2], [1, 0], [1, 1]], dtype=tf.int64)
     values = tf.constant([0.4, 0.6, 0.8, 0.5])
     shape = tf.constant([2, 3], dtype=tf.int64)
     model = linear.LinearModel()
     inp = tf.SparseTensor(indices, values, shape)
     output = model(inp)
     self.evaluate(tf.compat.v1.global_variables_initializer())
     if tf.executing_eagerly():
         weights = model.get_weights()
         weights[0] = np.ones((3, 1))
         model.set_weights(weights)
         output = model(inp)
         self.assertAllClose([[1.0], [1.3]], self.evaluate(output))
Exemple #7
0
 def test_wide_deep_model(self):
     linear_model = linear.LinearModel(units=1)
     dnn_model = sequential.Sequential([core.Dense(units=1, input_dim=3)])
     wide_deep_model = wide_deep.WideDeepModel(linear_model, dnn_model)
     linear_inp = np.random.uniform(low=-5., high=5., size=(64, 2))
     dnn_inp = np.random.uniform(low=-5., high=5., size=(64, 3))
     inputs = [linear_inp, dnn_inp]
     output = .3 * linear_inp[:, 0] + .2 * dnn_inp[:, 1]
     wide_deep_model.compile(optimizer=['sgd', 'adam'],
                             loss='mse',
                             metrics=[],
                             run_eagerly=test_utils.should_run_eagerly())
     wide_deep_model.fit(inputs, output, epochs=5)
     self.assertTrue(wide_deep_model.built)
Exemple #8
0
    def test_linear_model_with_sparse_input_and_custom_training(self):
        batch_size = 64
        indices = []
        values = []
        target = np.zeros((batch_size, 1))
        for i in range(64):
            rand_int = np.random.randint(3)
            if rand_int == 0:
                indices.append((i, 0))
                val = np.random.uniform(low=-5.0, high=5.0)
                values.append(val)
                target[i] = 0.3 * val
            elif rand_int == 1:
                indices.append((i, 1))
                val = np.random.uniform(low=-5.0, high=5.0)
                values.append(val)
                target[i] = 0.2 * val
            else:
                indices.append((i, 0))
                indices.append((i, 1))
                val_1 = np.random.uniform(low=-5.0, high=5.0)
                val_2 = np.random.uniform(low=-5.0, high=5.0)
                values.append(val_1)
                values.append(val_2)
                target[i] = 0.3 * val_1 + 0.2 * val_2

        indices = np.asarray(indices)
        values = np.asarray(values)
        shape = tf.constant([batch_size, 2], dtype=tf.int64)
        inp = tf.SparseTensor(indices, values, shape)
        model = linear.LinearModel(use_bias=False)
        opt = gradient_descent.SGD()
        for _ in range(20):
            with tf.GradientTape() as t:
                output = model(inp)
                loss = backend.mean(losses.mean_squared_error(target, output))
            grads = t.gradient(loss, model.trainable_variables)
            grads_and_vars = zip(grads, model.trainable_variables)
            opt.apply_gradients(grads_and_vars)
Exemple #9
0
 def test_linear_model_with_feature_column(self):
     vocab_list = ['alpha', 'beta', 'gamma']
     vocab_val = [0.4, 0.6, 0.9]
     data = np.random.choice(vocab_list, size=256)
     y = np.zeros_like(data, dtype=np.float32)
     for vocab, val in zip(vocab_list, vocab_val):
         indices = np.where(data == vocab)
         y[indices] = val + np.random.uniform(
             low=-0.01, high=0.01, size=indices[0].shape)
     cat_column = tf.feature_column.categorical_column_with_vocabulary_list(
         key='symbol', vocabulary_list=vocab_list)
     ind_column = tf.feature_column.indicator_column(cat_column)
     dense_feature_layer = dense_features_v2.DenseFeatures([ind_column])
     linear_model = linear.LinearModel(use_bias=False,
                                       kernel_initializer='zeros')
     combined = sequential.Sequential([dense_feature_layer, linear_model])
     opt = gradient_descent.SGD(learning_rate=0.1)
     combined.compile(opt, 'mse', [])
     combined.fit(x={'symbol': data}, y=y, batch_size=32, epochs=10)
     self.assertAllClose([[0.4], [0.6], [0.9]],
                         combined.layers[1].dense_layers[0].kernel.numpy(),
                         atol=0.01)
Exemple #10
0
 def test_wide_deep_model_as_layer(self):
     linear_model = linear.LinearModel(units=1)
     dnn_model = sequential.Sequential([core.Dense(units=1)])
     linear_input = input_layer.Input(shape=(3, ), name='linear')
     dnn_input = input_layer.Input(shape=(5, ), name='dnn')
     wide_deep_model = wide_deep.WideDeepModel(linear_model, dnn_model)
     wide_deep_output = wide_deep_model((linear_input, dnn_input))
     input_b = input_layer.Input(shape=(1, ), name='b')
     output_b = core.Dense(units=1)(input_b)
     model = training.Model(inputs=[linear_input, dnn_input, input_b],
                            outputs=[wide_deep_output + output_b])
     linear_input_np = np.random.uniform(low=-5., high=5., size=(64, 3))
     dnn_input_np = np.random.uniform(low=-5., high=5., size=(64, 5))
     input_b_np = np.random.uniform(low=-5., high=5., size=(64, ))
     output_np = linear_input_np[:,
                                 0] + .2 * dnn_input_np[:, 1] + input_b_np
     model.compile(optimizer='sgd',
                   loss='mse',
                   metrics=[],
                   run_eagerly=test_utils.should_run_eagerly())
     model.fit([linear_input_np, dnn_input_np, input_b_np],
               output_np,
               epochs=5)
Exemple #11
0
    def test_wide_deep_model_with_multi_outputs(self):
        inp = input_layer.Input(shape=(1, ), name="linear")
        l = linear.LinearModel(units=2, use_bias=False)(inp)
        l1, l2 = tf.split(l, num_or_size_splits=2, axis=1)
        linear_model = training.Model(inp, [l1, l2])
        linear_model.set_weights([np.asarray([[0.5, 0.3]])])
        h = core.Dense(units=2, use_bias=False)(inp)
        h1, h2 = tf.split(h, num_or_size_splits=2, axis=1)
        dnn_model = training.Model(inp, [h1, h2])
        dnn_model.set_weights([np.asarray([[0.1, -0.5]])])
        wide_deep_model = wide_deep.WideDeepModel(linear_model, dnn_model)
        inp_np = np.asarray([[1.0]])
        out1, out2 = wide_deep_model(inp_np)
        # output should be (0.5 + 0.1), and (0.3 - 0.5)
        self.assertAllClose([[0.6]], out1)
        self.assertAllClose([[-0.2]], out2)

        wide_deep_model = wide_deep.WideDeepModel(linear_model,
                                                  dnn_model,
                                                  activation="relu")
        out1, out2 = wide_deep_model(inp_np)
        # output should be relu((0.5 + 0.1)), and relu((0.3 - 0.5))
        self.assertAllClose([[0.6]], out1)
        self.assertAllClose([[0.0]], out2)
Exemple #12
0
 def test_wide_deep_model_with_single_feature_column(self):
     vocab_list = ["alpha", "beta", "gamma"]
     vocab_val = [0.4, 0.6, 0.9]
     data = np.random.choice(vocab_list, size=256)
     y = np.zeros_like(data, dtype=np.float32)
     for vocab, val in zip(vocab_list, vocab_val):
         indices = np.where(data == vocab)
         y[indices] = val + np.random.uniform(
             low=-0.01, high=0.01, size=indices[0].shape)
     cat_column = tf.feature_column.categorical_column_with_vocabulary_list(
         key="symbol", vocabulary_list=vocab_list)
     ind_column = tf.feature_column.indicator_column(cat_column)
     dense_feature_layer = dense_features_v2.DenseFeatures([ind_column])
     linear_model = linear.LinearModel(use_bias=False,
                                       kernel_initializer="zeros")
     dnn_model = sequential.Sequential([core.Dense(units=1)])
     wide_deep_model = wide_deep.WideDeepModel(linear_model, dnn_model)
     combined = sequential.Sequential(
         [dense_feature_layer, wide_deep_model])
     opt = gradient_descent.SGD(learning_rate=0.1)
     combined.compile(opt,
                      "mse", [],
                      run_eagerly=test_utils.should_run_eagerly())
     combined.fit(x={"symbol": data}, y=y, batch_size=32, epochs=10)
Exemple #13
0
 def test_config(self):
     linear_model = linear.LinearModel(units=3, use_bias=True)
     config = linear_model.get_config()
     cloned_linear_model = linear.LinearModel.from_config(config)
     self.assertEqual(linear_model.units, cloned_linear_model.units)