Esempio n. 1
0
 def test_single_feature_lookup_1D(self, sigma_dimension):
     emb_dim = 5 + sigma_dimension
     config = test_util.default_de_config(emb_dim, [1] * emb_dim)
     fea_embed = sparse_features.SparseFeatureEmbedding(
         config, {'fea': (5, sigma_dimension)},
         op_name='single_feature_%d' % sigma_dimension,
         service_address=self._kbs_address)
     embed, _, _, embed_map = fea_embed.lookup(['input1', 'input2'])
     if sigma_dimension > 0:
         self.assertEqual((2, 5), embed.shape)
     else:
         self.assertAllClose([[1] * 5, [1] * 5], embed)
     self.assertEqual(['fea'], list(embed_map.keys()))
     self.assertEqual((2, 5), embed_map['fea'].shape)
     self.assertEqual(['fea'], list(fea_embed._variable_map.keys()))
Esempio n. 2
0
    def test_training_logistic(self):
        self._config.gradient_descent_config.learning_rate = 0.05
        fea_embed = sparse_features.SparseFeatureEmbedding(
            self._config, {
                'weather': (10, 2),
                'day_of_week': (10, 2)
            },
            op_name='multiple_feature',
            service_address=self._kbs_address)

        model = tf.keras.models.Sequential(
            [fea_embed,
             tf.keras.layers.Dense(2, activation='softmax')])
        model.compile(optimizer='sgd',
                      loss='categorical_crossentropy',
                      metrics=['accuracy'])

        # Create an optimizer.
        optimizer = tf.keras.optimizers.SGD(learning_rate=0.05)

        x_train = {
            'weather': [['cold', 'day'], ['hot', ''], ['warm', 'day'],
                        ['warm', '']],
            'day_of_week': [['monday', 'day'], ['tuesday', 'day'],
                            ['sunday', ''], ['saturday', '']],
        }
        y_train = np.array([[0.0, 1.0], [0.0, 1.0], [1.0, 0.0], [1.0, 0.0]])
        # Test the shape of model's output.
        self.assertEqual((4, 2), model(x_train).shape)
        loss_layer = tf.keras.losses.CategoricalCrossentropy(from_logits=True)

        logits = model(x_train)
        init_loss = loss_layer(logits, y_train)
        for _ in range(10):
            with tf.GradientTape() as tape:
                logits = model(x_train)
                loss = loss_layer(logits, y_train)
            grads = tape.gradient(loss, model.trainable_weights)
            # Update the trainable variables w.r.t. the logistic loss
            optimizer.apply_gradients(zip(grads, model.trainable_weights))
            print('===>loss: ', loss_layer(logits, y_train).numpy())
        # Checks the loss is dropped after 10 steps of training.
        logits = model(x_train)
        final_loss = loss_layer(logits, y_train)
        self.assertLess(final_loss.numpy(), init_loss.numpy())
Esempio n. 3
0
 def test_multiple_feature_lookup_2D_with_sigma(self):
     fea_embed = sparse_features.SparseFeatureEmbedding(
         self._config, {
             'fea1': (5, 1),
             'fea2': (10, 1)
         },
         op_name='multiple_feature2',
         service_address=self._kbs_address)
     embed, _, _, embed_map = fea_embed.lookup({
         'fea1': [['input1', ''], ['input2', '']],
         'fea2': [['input3', 'input5'], ['input4', 'input6']]
     })
     self.assertEqual((2, 15), embed.shape)
     self.assertLen(embed_map.keys(), 2)
     self.assertIn('fea1', embed_map.keys())
     self.assertIn('fea2', embed_map.keys())
     self.assertEqual((2, 2, 5), embed_map['fea1'].shape)
     self.assertEqual((2, 2, 10), embed_map['fea2'].shape)
     self.assertLen(fea_embed._variable_map.keys(), 2)
     self.assertIn('fea1', fea_embed._variable_map.keys())
     self.assertIn('fea2', fea_embed._variable_map.keys())
Esempio n. 4
0
 def test_multiple_feature_lookup_2D_without_sigma(self):
     config = test_util.default_de_config(5, [1] * 5)
     fea_embed = sparse_features.SparseFeatureEmbedding(
         config, {
             'fea1': (5, 0),
             'fea2': (5, 0)
         },
         op_name='multiple_feature3',
         service_address=self._kbs_address)
     embed, _, _, embed_map = fea_embed.lookup({
         'fea1': [['input1', ''], ['input2', '']],
         'fea2': [['input3', 'input5'], ['input4', 'input6']]
     })
     self.assertAllClose([[1] * 10, [1] * 10], embed.numpy())
     self.assertLen(embed_map.keys(), 2)
     self.assertIn('fea1', embed_map.keys())
     self.assertIn('fea2', embed_map.keys())
     self.assertEqual((2, 2, 5), embed_map['fea1'].shape)
     self.assertEqual((2, 2, 5), embed_map['fea2'].shape)
     self.assertLen(fea_embed._variable_map.keys(), 2)
     self.assertIn('fea1', fea_embed._variable_map.keys())
     self.assertIn('fea2', fea_embed._variable_map.keys())