Exemplo n.º 1
0
    def test_feature_layer_cpu(self):
        # Inputs.
        vocabulary_size = 3
        input_a = sparse_tensor.SparseTensorValue(
            # example 0, ids [2]
            # example 1, ids [0, 1]
            indices=((0, 0), (1, 0), (1, 1)),
            values=(2, 0, 1),
            dense_shape=(2, 2))
        input_b = sparse_tensor.SparseTensorValue(
            # example 0, ids [2]
            # example 1, ids [0, 1]
            # example 2, ids []
            indices=((0, 0), (1, 0), (1, 1)),
            values=(2, 0, 1),
            dense_shape=(3, 2))
        input_features = {'aaa': input_a, 'bbb': input_b}

        # Embedding variable.
        embedding_dimension = 2
        embedding_values = (
            (1., 2.),  # id 0
            (3., 5.),  # id 1
            (7., 11.)  # id 2
        )

        def _initializer(shape, dtype, partition_info=None):
            self.assertAllEqual((vocabulary_size, embedding_dimension), shape)
            self.assertEqual(dtypes.float32, dtype)
            self.assertIsNone(partition_info)
            return embedding_values

        # Expected lookup result, using combiner='mean'.
        expected_lookups_a = (
            # example 0:
            (7., 11.),  # ids [2], embedding = [7, 11]
            # example 1:
            (2., 3.5
             ),  # ids [0, 1], embedding = mean([1, 2] + [3, 5]) = [2, 3.5]
        )
        expected_lookups_b = (
            # example 0:
            (
                (7., 11.),
                (0., 0.),
            ),  # ids [2], embedding = [[7, 11], [0, 0]]
            # example 1:
            (
                (1., 2.),
                (3., 5.),
            ),  # ids [0, 1], embedding = [[1, 2], [3, 5]]
            # example 2:
            (
                (0., 0.),
                (0., 0.),
            ),  # ids [], embedding = [[0, 0], [0, 0]]
        )

        # Build columns.
        categorical_column_a = fc_lib.categorical_column_with_identity(
            key='aaa', num_buckets=vocabulary_size)
        categorical_column_b = fc_lib.sequence_categorical_column_with_identity(
            key='bbb', num_buckets=vocabulary_size)
        embedding_column_a, embedding_column_b = tpu_fc.shared_embedding_columns_v2(
            [categorical_column_a, categorical_column_b],
            dimension=embedding_dimension,
            initializer=_initializer,
            max_sequence_lengths=[0, 2])

        # Provide sparse input and get dense result.
        dense_features = fc_lib.DenseFeatures([embedding_column_a])
        sequence_features = fc_lib.SequenceFeatures([embedding_column_b])
        embedding_lookup_a = dense_features(input_features)
        embedding_lookup_b = sequence_features(input_features)

        # Assert expected embedding variable and lookups.
        global_vars = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
        self.assertItemsEqual(('aaa_bbb_shared_embedding:0', ),
                              tuple([v.name for v in global_vars]))
        embedding_var = global_vars[0]
        with _initialized_session():
            self.assertAllEqual(embedding_values, embedding_var.eval())
            self.assertAllEqual(expected_lookups_a, embedding_lookup_a.eval())
            self.assertAllEqual(expected_lookups_b,
                                embedding_lookup_b[0].eval())
Exemplo n.º 2
0
    def test_feature_layer_cpu(self):
        # Inputs.
        vocabulary_size = 3
        sparse_input = sparse_tensor.SparseTensorValue(
            # example 0, ids [2]
            # example 1, ids [0, 1]
            # example 2, ids []
            # example 3, ids [1]
            indices=((0, 0), (1, 0), (1, 1), (3, 0)),
            values=(2, 0, 1, 1),
            dense_shape=(4, 2))

        # Embedding variable.
        embedding_dimension = 2
        embedding_values = (
            (1., 2.),  # id 0
            (3., 5.),  # id 1
            (7., 11.)  # id 2
        )

        def _initializer(shape, dtype, partition_info=None):
            self.assertAllEqual((vocabulary_size, embedding_dimension), shape)
            self.assertEqual(dtypes.float32, dtype)
            self.assertIsNone(partition_info)
            return embedding_values

        # Expected lookup result, using combiner='mean'.
        expected_lookups = (
            # example 0, ids [2], embedding = [7, 11]
            (7., 11.),
            # example 1, ids [0, 1], embedding = mean([1, 2] + [3, 5]) = [2, 3.5]
            (2., 3.5),
            # example 2, ids [], embedding = [0, 0]
            (0., 0.),
            # example 3, ids [1], embedding = [3, 5]
            (3., 5.),
        )
        expected_lookups_sequence = (
            # example 0, ids [2], embedding = [[7, 11], [0, 0]]
            (
                (7., 11.),
                (0., 0.),
            ),
            # example 1, ids [0, 1], embedding = [[1, 2], [3. 5]]
            (
                (1., 2.),
                (3., 5.),
            ),
            # example 2, ids [], embedding = [0, 0]
            (
                (0., 0.),
                (0., 0.),
            ),
            # example 3, ids [1], embedding = [3, 5]
            (
                (3., 5.),
                (0., 0.),
            ),
        )

        # Build columns.
        categorical_column = fc_lib.categorical_column_with_identity(
            key='aaa', num_buckets=vocabulary_size)
        sequence_categorical_column = (
            fc_lib.sequence_categorical_column_with_identity(
                key='bbb', num_buckets=vocabulary_size))
        embedding_column = tpu_fc.embedding_column_v2(
            categorical_column,
            dimension=embedding_dimension,
            initializer=_initializer)
        sequence_embedding_column = tpu_fc.embedding_column_v2(
            sequence_categorical_column,
            dimension=embedding_dimension,
            initializer=_initializer,
            max_sequence_length=2)

        # Provide sparse input and get dense result.
        features = {'aaa': sparse_input, 'bbb': sparse_input}
        dense_features = fc_lib.DenseFeatures([embedding_column])
        sequence_features = fc_lib.SequenceFeatures(
            [sequence_embedding_column])
        embedding_lookup = dense_features(features)
        sequence_embedding_lookup = sequence_features(features)

        # Assert expected embedding variable and lookups.
        global_vars = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
        self.assertItemsEqual((
            'dense_features/aaa_embedding/embedding_weights:0',
            'sequence_features/bbb_embedding/embedding_weights:0',
        ), tuple([v.name for v in global_vars]))
        with _initialized_session():
            self.assertAllEqual(embedding_values, global_vars[0].eval())
            self.assertAllEqual(expected_lookups, embedding_lookup.eval())
            self.assertAllEqual(expected_lookups_sequence,
                                sequence_embedding_lookup[0].eval())
Exemplo n.º 3
0
    def test_encode_features_sequence_column(self):
        with tf.Graph().as_default():
            # Inputs.
            vocabulary_size = 4
            # Sequence of ids. -1 values are ignored.
            input_seq_ids = np.array([
                [3, -1, -1],  # example 0
                [0, 1, -1],  # example 1
            ])
            # Sequence of numeric values.
            # input_seq_nums = [
            #  [1.],  # example 0.
            #  [2., 3.],  # example 1
            # ]
            input_seq_nums = tf.sparse.SparseTensor(indices=[[0, 0], [1, 0],
                                                             [1, 1]],
                                                    values=[1., 2., 3.],
                                                    dense_shape=(2, 3))

            input_features = {
                "seq_ids": input_seq_ids,
                "seq_nums": input_seq_nums
            }

            # Embedding variable.
            embedding_dimension = 2
            embedding_values = (
                (1., 2.),  # id 0
                (3., 5.),  # id 1
                (7., 11.),  # id 2
                (9., 13.)  # id 3
            )

            # Expected sequence embeddings for input_seq_ids.
            expected_seq_embed = [
                # example 0:
                [[9., 13.], [0., 0.], [0., 0.]],
                # example 1:
                [[1., 2.], [3., 5.], [0., 0.]],
            ]
            expected_seq_nums = [
                # example 0:
                [[1.], [0.], [0.]],
                # example 1:
                [[2.], [3.], [0.]],
            ]

            # Build columns.
            seq_categorical_column = (
                feature_column.sequence_categorical_column_with_identity(
                    key="seq_ids", num_buckets=vocabulary_size))
            seq_embed_column = feature_column.embedding_column(
                seq_categorical_column,
                dimension=embedding_dimension,
                initializer=lambda shape, dtype, partition_info:
                embedding_values)
            seq_numeric_column = feature_column.sequence_numeric_column(
                "seq_nums")

            cols_to_tensors = feature_lib.encode_features(
                input_features, [seq_embed_column, seq_numeric_column],
                mode=tf.estimator.ModeKeys.EVAL)
            actual_seq_embed = cols_to_tensors[seq_embed_column]
            actual_seq_nums = cols_to_tensors[seq_numeric_column]

            # Assert embedding variable and encoded sequence features.
            global_vars = tf.compat.v1.get_collection(
                tf.compat.v1.GraphKeys.GLOBAL_VARIABLES)
            embedding_var = global_vars[0]
            with tf.compat.v1.Session() as sess:
                sess.run(tf.compat.v1.global_variables_initializer())
                sess.run(tf.compat.v1.tables_initializer())
                self.assertAllEqual(embedding_values, embedding_var.eval())
                self.assertAllEqual(expected_seq_embed, actual_seq_embed)
                self.assertAllEqual(expected_seq_nums, actual_seq_nums)