def DISABLED_test_function_model_feature_layer_input(self):
        col_a = fc.numeric_column_v2('a')
        col_b = fc.numeric_column_v2('b')

        feature_layer = fc.DenseFeatures([col_a, col_b], name='fc')
        dense = keras.layers.Dense(4)

        # This seems problematic.... We probably need something for DenseFeatures
        # the way Input is for InputLayer.
        output = dense(feature_layer)

        model = keras.models.Model([feature_layer], [output])

        optimizer = rmsprop.RMSPropOptimizer(learning_rate=0.001)
        loss = 'mse'
        loss_weights = [1., 0.5]
        model.compile(optimizer,
                      loss,
                      metrics=[metrics_module.CategoricalAccuracy(), 'mae'],
                      loss_weights=loss_weights)

        data = ({'a': np.arange(10), 'b': np.arange(10)}, np.arange(10, 20))
        print(model.fit(*data, epochs=1))
Exemplo n.º 2
0
    def testDenseFeatures_shareAcrossApplication(self):
        features = {
            "image": [[[[0.1, 0.2, 0.3], [0.4, 0.5, 0.6]]],
                      [[[0.7, 0.7, 0.7], [0.1, 0.2, 0.3]]]],
        }
        feature_columns = [
            hub.image_embedding_column("image",
                                       self.randomly_initialized_spec),
        ]
        if not feature_column_v2.is_feature_column_v2(feature_columns):
            self.skipTest(
                "Resources not implemented in the state manager of feature "
                "column v2.")
        with tf.Graph().as_default():
            feature_layer = feature_column_v2.DenseFeatures(feature_columns)
            feature_layer_out_1 = feature_layer(features)
            feature_layer_out_2 = feature_layer(features)

            with tf_v1.train.MonitoredSession() as sess:
                output_1 = sess.run(feature_layer_out_1)
                output_2 = sess.run(feature_layer_out_2)

                self.assertAllClose(output_1, output_2)
    def test_sequence_example_into_input_layer(self):
        examples = [_make_sequence_example().SerializeToString()] * 100
        ctx_cols, seq_cols = self._build_feature_columns()

        def _parse_example(example):
            ctx, seq = parsing_ops.parse_single_sequence_example(
                example,
                context_features=fc.make_parse_example_spec_v2(ctx_cols),
                sequence_features=fc.make_parse_example_spec_v2(seq_cols))
            ctx.update(seq)
            return ctx

        ds = dataset_ops.Dataset.from_tensor_slices(examples)
        ds = ds.map(_parse_example)
        ds = ds.batch(20)

        # Test on a single batch
        features = ds.make_one_shot_iterator().get_next()

        # Tile the context features across the sequence features
        sequence_input_layer = sfc.SequenceFeatures(seq_cols)
        seq_layer, _ = sequence_input_layer(features)
        input_layer = fc.DenseFeatures(ctx_cols)
        ctx_layer = input_layer(features)
        input_layer = sfc.concatenate_context_input(ctx_layer, seq_layer)

        rnn_layer = recurrent.RNN(recurrent.SimpleRNNCell(10))
        output = rnn_layer(input_layer)

        with self.cached_session() as sess:
            sess.run(variables.global_variables_initializer())
            features_r = sess.run(features)
            self.assertAllEqual(features_r['int_list'].dense_shape, [20, 3, 6])

            output_r = sess.run(output)
            self.assertAllEqual(output_r.shape, [20, 10])
Exemplo n.º 4
0
    def test_from_config(self, trainable, name):
        cols = [
            fc.numeric_column('a'),
            fc.embedding_column(fc.categorical_column_with_vocabulary_list(
                'b', vocabulary_list=['1', '2', '3']),
                                dimension=2),
            fc.indicator_column(
                fc.categorical_column_with_hash_bucket(key='c',
                                                       hash_bucket_size=3))
        ]
        orig_layer = fc.DenseFeatures(cols, trainable=trainable, name=name)
        config = orig_layer.get_config()

        new_layer = fc.DenseFeatures.from_config(config)

        self.assertEqual(new_layer.name, orig_layer.name)
        self.assertEqual(new_layer.trainable, trainable)
        self.assertLen(new_layer._feature_columns, 3)
        self.assertEqual(new_layer._feature_columns[0].name, 'a')
        self.assertEqual(new_layer._feature_columns[1].initializer.mean, 0.0)
        self.assertEqual(new_layer._feature_columns[1].categorical_column.name,
                         'b')
        self.assertIsInstance(new_layer._feature_columns[2],
                              fc.IndicatorColumn)
 def __init__(self, feature_columns, units, name=None, **kwargs):
     super(TestDNNModel, self).__init__(name=name, **kwargs)
     self._input_layer = fc.DenseFeatures(feature_columns,
                                          name='input_layer')
     self._dense_layer = keras.layers.Dense(units, name='dense_layer')
    def DISABLED_test_function_model_multiple_feature_layer_inputs(self):
        col_a = fc.numeric_column_v2('a')
        col_b = fc.numeric_column_v2('b')
        col_c = fc.numeric_column_v2('c')

        fc1 = fc.DenseFeatures([col_a, col_b], name='fc1')
        fc2 = fc.DenseFeatures([col_b, col_c], name='fc2')
        dense = keras.layers.Dense(4)

        # This seems problematic.... We probably need something for DenseFeatures
        # the way Input is for InputLayer.
        output = dense(fc1) + dense(fc2)

        model = keras.models.Model([fc1, fc2], [output])

        optimizer = rmsprop.RMSPropOptimizer(learning_rate=0.001)
        loss = 'mse'
        loss_weights = [1., 0.5]
        model.compile(optimizer,
                      loss,
                      metrics=[metrics_module.CategoricalAccuracy(), 'mae'],
                      loss_weights=loss_weights)

        data_list = ([{
            'a': np.arange(10),
            'b': np.arange(10)
        }, {
            'b': np.arange(10),
            'c': np.arange(10)
        }], np.arange(10, 100))
        print(model.fit(*data_list, epochs=1))

        data_bloated_list = ([{
            'a': np.arange(10),
            'b': np.arange(10),
            'c': np.arange(10)
        }, {
            'a': np.arange(10),
            'b': np.arange(10),
            'c': np.arange(10)
        }], np.arange(10, 100))
        print(model.fit(*data_bloated_list, epochs=1))

        data_dict = ({
            'fc1': {
                'a': np.arange(10),
                'b': np.arange(10)
            },
            'fc2': {
                'b': np.arange(10),
                'c': np.arange(10)
            }
        }, np.arange(10, 100))
        print(model.fit(*data_dict, epochs=1))

        data_bloated_dict = ({
            'fc1': {
                'a': np.arange(10),
                'b': np.arange(10),
                'c': np.arange(10)
            },
            'fc2': {
                'a': np.arange(10),
                'b': np.arange(10),
                'c': np.arange(10)
            }
        }, np.arange(10, 100))
        print(model.fit(*data_bloated_dict, epochs=1))
Exemplo n.º 7
0
    def __init__(self,
                 units,
                 hidden_units,
                 feature_columns,
                 activation_fn,
                 dropout,
                 input_layer_partitioner,
                 batch_norm,
                 name=None,
                 **kwargs):
        super(_DNNModel, self).__init__(name=name, **kwargs)
        if feature_column_v2.is_feature_column_v2(feature_columns):
            self._input_layer = feature_column_v2.DenseFeatures(
                feature_columns=feature_columns, name='input_layer')
        else:
            self._input_layer = feature_column.InputLayer(
                feature_columns=feature_columns,
                name='input_layer',
                create_scope_now=False)

        self._add_layer(self._input_layer, 'input_layer')

        self._dropout = dropout
        self._batch_norm = batch_norm

        self._hidden_layers = []
        self._dropout_layers = []
        self._batch_norm_layers = []
        self._hidden_layer_scope_names = []
        for layer_id, num_hidden_units in enumerate(hidden_units):
            with variable_scope.variable_scope('hiddenlayer_%d' %
                                               layer_id) as hidden_layer_scope:
                hidden_layer = core_layers.Dense(
                    units=num_hidden_units,
                    activation=activation_fn,
                    kernel_initializer=init_ops.glorot_uniform_initializer(),
                    name=hidden_layer_scope,
                    _scope=hidden_layer_scope)
                self._add_layer(hidden_layer, hidden_layer_scope.name)
                self._hidden_layer_scope_names.append(hidden_layer_scope.name)
                self._hidden_layers.append(hidden_layer)
                if self._dropout is not None:
                    dropout_layer = core_layers.Dropout(rate=self._dropout)
                    self._add_layer(dropout_layer, dropout_layer.name)
                    self._dropout_layers.append(dropout_layer)
                if self._batch_norm:
                    batch_norm_layer = normalization.BatchNormalization(
                        # The default momentum 0.99 actually crashes on certain
                        # problem, so here we use 0.999, which is the default of
                        # tf.contrib.layers.batch_norm.
                        momentum=0.999,
                        trainable=True,
                        name='batchnorm_%d' % layer_id,
                        _scope='batchnorm_%d' % layer_id)
                    self._add_layer(batch_norm_layer, batch_norm_layer.name)
                    self._batch_norm_layers.append(batch_norm_layer)

        with variable_scope.variable_scope('logits') as logits_scope:
            self._logits_layer = core_layers.Dense(
                units=units,
                activation=None,
                kernel_initializer=init_ops.glorot_uniform_initializer(),
                name=logits_scope,
                _scope=logits_scope)
            self._add_layer(self._logits_layer, logits_scope.name)
            self._logits_scope_name = logits_scope.name
        self._input_layer_partitioner = input_layer_partitioner