Beispiel #1
0
def input_layer(features, feature_columns):
    if tf.__version__ < "1.15.0":
        from tensorflow.python.feature_column import feature_column_v2 as fc
        return fc.FeatureLayer(feature_columns)(features)
    elif tf.__version__ < "2.0.0":
        return tf.feature_column.input_layer(features, feature_columns)
    else:
        return tf.compat.v1.feature_column.input_layer(features,
                                                       feature_columns)
    def test_sequential_model(self):
        columns = [fc.numeric_column('a')]
        model = keras.models.Sequential([
            fc.FeatureLayer(columns),
            keras.layers.Dense(64, activation='relu'),
            keras.layers.Dense(20, activation='softmax')
        ])
        model.compile(optimizer=rmsprop.RMSPropOptimizer(1e-3),
                      loss='categorical_crossentropy',
                      metrics=['accuracy'])

        x = {'a': np.random.random((10, 1))}
        y = np.random.randint(20, size=(10, 1))
        y = keras.utils.to_categorical(y, num_classes=20)
        model.fit(x, y, epochs=1, batch_size=5)
        model.fit(x, y, epochs=1, batch_size=5)
        model.evaluate(x, y, batch_size=5)
        model.predict(x, batch_size=5)
    def test_sequential_model_with_ds_input(self):
        columns = [fc.numeric_column('a')]
        model = keras.models.Sequential([
            fc.FeatureLayer(columns),
            keras.layers.Dense(64, activation='relu'),
            keras.layers.Dense(20, activation='softmax')
        ])
        model.compile(optimizer=rmsprop.RMSPropOptimizer(1e-3),
                      loss='categorical_crossentropy',
                      metrics=['accuracy'])

        y = np.random.randint(20, size=(100, 1))
        y = keras.utils.to_categorical(y, num_classes=20)
        x = {'a': np.random.random((100, 1))}
        ds1 = dataset_ops.Dataset.from_tensor_slices(x)
        ds2 = dataset_ops.Dataset.from_tensor_slices(y)
        ds = dataset_ops.Dataset.zip((ds1, ds2)).batch(5)
        model.fit(ds, steps_per_epoch=1)
        model.fit(ds, steps_per_epoch=1)
        model.evaluate(ds, steps=1)
        model.predict(ds, steps=1)
    def DISABLED_test_function_model_feature_layer_input(self):
        col_a = fc.numeric_column('a')
        col_b = fc.numeric_column('b')

        feature_layer = fc.FeatureLayer([col_a, col_b], name='fc')
        dense = keras.layers.Dense(4)

        # This seems problematic.... We probably need something for FeatureLayer
        # the way Input is for InputLayer.
        output = dense(feature_layer)

        model = keras.models.Model([feature_layer], [output])

        optimizer = rmsprop.RMSPropOptimizer(learning_rate=0.001)
        loss = 'mse'
        loss_weights = [1., 0.5]
        model.compile(optimizer,
                      loss,
                      metrics=[metrics_module.CategoricalAccuracy(), 'mae'],
                      loss_weights=loss_weights)

        data = ({'a': np.arange(10), 'b': np.arange(10)}, np.arange(10, 20))
        print(model.fit(*data, epochs=1))
Beispiel #5
0
    def __init__(self,
                 units,
                 hidden_units,
                 feature_columns,
                 activation_fn,
                 dropout,
                 input_layer_partitioner,
                 batch_norm,
                 name=None,
                 **kwargs):
        super(_DNNModel, self).__init__(name=name, **kwargs)
        if feature_column_v2.is_feature_column_v2(feature_columns):
            self._input_layer = feature_column_v2.FeatureLayer(
                feature_columns=feature_columns, name='input_layer')
        else:
            self._input_layer = feature_column.InputLayer(
                feature_columns=feature_columns,
                name='input_layer',
                create_scope_now=False)

        self._add_layer(self._input_layer, 'input_layer')

        self._dropout = dropout
        self._batch_norm = batch_norm

        self._hidden_layers = []
        self._dropout_layers = []
        self._batch_norm_layers = []
        self._hidden_layer_scope_names = []
        for layer_id, num_hidden_units in enumerate(hidden_units):
            with variable_scope.variable_scope('hiddenlayer_%d' %
                                               layer_id) as hidden_layer_scope:
                hidden_layer = core_layers.Dense(
                    units=num_hidden_units,
                    activation=activation_fn,
                    kernel_initializer=init_ops.glorot_uniform_initializer(),
                    name=hidden_layer_scope,
                    _scope=hidden_layer_scope)
                self._add_layer(hidden_layer, hidden_layer_scope.name)
                self._hidden_layer_scope_names.append(hidden_layer_scope.name)
                self._hidden_layers.append(hidden_layer)
                if self._dropout is not None:
                    dropout_layer = core_layers.Dropout(rate=self._dropout)
                    self._add_layer(dropout_layer, dropout_layer.name)
                    self._dropout_layers.append(dropout_layer)
                if self._batch_norm:
                    batch_norm_layer = normalization.BatchNormalization(
                        # The default momentum 0.99 actually crashes on certain
                        # problem, so here we use 0.999, which is the default of
                        # tf.contrib.layers.batch_norm.
                        momentum=0.999,
                        trainable=True,
                        name='batchnorm_%d' % layer_id,
                        _scope='batchnorm_%d' % layer_id)
                    self._add_layer(batch_norm_layer, batch_norm_layer.name)
                    self._batch_norm_layers.append(batch_norm_layer)

        with variable_scope.variable_scope('logits') as logits_scope:
            self._logits_layer = core_layers.Dense(
                units=units,
                activation=None,
                kernel_initializer=init_ops.glorot_uniform_initializer(),
                name=logits_scope,
                _scope=logits_scope)
            self._add_layer(self._logits_layer, logits_scope.name)
            self._logits_scope_name = logits_scope.name
        self._input_layer_partitioner = input_layer_partitioner
 def __init__(self, feature_columns, units, name=None, **kwargs):
     super(TestDNNModel, self).__init__(name=name, **kwargs)
     self._input_layer = fc.FeatureLayer(feature_columns,
                                         name='input_layer')
     self._dense_layer = keras.layers.Dense(units, name='dense_layer')
    def DISABLED_test_function_model_multiple_feature_layer_inputs(self):
        col_a = fc.numeric_column('a')
        col_b = fc.numeric_column('b')
        col_c = fc.numeric_column('c')

        fc1 = fc.FeatureLayer([col_a, col_b], name='fc1')
        fc2 = fc.FeatureLayer([col_b, col_c], name='fc2')
        dense = keras.layers.Dense(4)

        # This seems problematic.... We probably need something for FeatureLayer
        # the way Input is for InputLayer.
        output = dense(fc1) + dense(fc2)

        model = keras.models.Model([fc1, fc2], [output])

        optimizer = rmsprop.RMSPropOptimizer(learning_rate=0.001)
        loss = 'mse'
        loss_weights = [1., 0.5]
        model.compile(optimizer,
                      loss,
                      metrics=[metrics_module.CategoricalAccuracy(), 'mae'],
                      loss_weights=loss_weights)

        data_list = ([{
            'a': np.arange(10),
            'b': np.arange(10)
        }, {
            'b': np.arange(10),
            'c': np.arange(10)
        }], np.arange(10, 100))
        print(model.fit(*data_list, epochs=1))

        data_bloated_list = ([{
            'a': np.arange(10),
            'b': np.arange(10),
            'c': np.arange(10)
        }, {
            'a': np.arange(10),
            'b': np.arange(10),
            'c': np.arange(10)
        }], np.arange(10, 100))
        print(model.fit(*data_bloated_list, epochs=1))

        data_dict = ({
            'fc1': {
                'a': np.arange(10),
                'b': np.arange(10)
            },
            'fc2': {
                'b': np.arange(10),
                'c': np.arange(10)
            }
        }, np.arange(10, 100))
        print(model.fit(*data_dict, epochs=1))

        data_bloated_dict = ({
            'fc1': {
                'a': np.arange(10),
                'b': np.arange(10),
                'c': np.arange(10)
            },
            'fc2': {
                'a': np.arange(10),
                'b': np.arange(10),
                'c': np.arange(10)
            }
        }, np.arange(10, 100))
        print(model.fit(*data_bloated_dict, epochs=1))