Пример #1
0
def model_fn_dense(features, labels, mode, params):
    dense_feature, sparse_feature = build_features()
    dense_input = tf.feature_column.input_layer(features, dense_feature)
    sparse_input = tf.feature_column.input_layer(features, sparse_feature)

    # Linear part
    with tf.variable_scope('Linear_component'):
        linear_output = tf.layers.dense(sparse_input, units=1)
        add_layer_summary('linear_output', linear_output)

    # Deep part
    dense_output = stack_dense_layer(dense_input,
                                     params['hidden_units'],
                                     params['dropout_rate'],
                                     params['batch_norm'],
                                     mode,
                                     add_summary=True)
    # CIN part
    emb_size = dense_feature[0].variable_shape.as_list()[-1]
    field_size = len(dense_feature)
    embedding_matrix = tf.reshape(
        dense_input,
        [-1, field_size, emb_size])  # batch * field_size * emb_size
    add_layer_summary('embedding_matrix', embedding_matrix)

    cin_output = cin_layer(embedding_matrix, params['cin_layer_size'],
                           emb_size, field_size)

    with tf.variable_scope('output'):
        y = tf.concat([dense_output, cin_output, linear_output], axis=1)
        y = tf.layers.dense(y, units=1)
        add_layer_summary('output', y)

    return y
Пример #2
0
def model_fn_dense(features, labels, mode, params):
    dense_feature, sparse_feature = build_features()
    dense = tf.feature_column.input_layer(features, dense_feature)
    sparse = tf.feature_column.input_layer(features, sparse_feature)

    with tf.variable_scope('FM_component'):
        with tf.variable_scope('Linear'):
            linear_output = tf.layers.dense(sparse, units=1)
            add_layer_summary('linear_output', linear_output)

        with tf.variable_scope('second_order'):
            # reshape (batch_size, n_feature * emb_size) -> (batch_size, n_feature, emb_size)
            emb_size = dense_feature[0].variable_shape.as_list()[
                0]  # all feature has same emb dimension
            embedding_matrix = tf.reshape(dense,
                                          (-1, len(dense_feature), emb_size))
            add_layer_summary('embedding_matrix', embedding_matrix)
            # Compared to FM embedding here is flatten(x * v) not v
            sum_square = tf.pow(tf.reduce_sum(embedding_matrix, axis=1), 2)
            square_sum = tf.reduce_sum(tf.pow(embedding_matrix, 2), axis=1)

            fm_output = tf.reduce_sum(tf.subtract(sum_square, square_sum) *
                                      0.5,
                                      axis=1,
                                      keepdims=True)
            add_layer_summary('fm_output', fm_output)

    with tf.variable_scope('Deep_component'):
        for i, unit in enumerate(params['hidden_units']):
            dense = tf.layers.dense(dense,
                                    units=unit,
                                    activation='relu',
                                    name='dense{}'.format(i))
            dense = tf.layers.batch_normalization(
                dense,
                center=True,
                scale=True,
                trainable=True,
                training=(mode == tf.estimator.ModeKeys.TRAIN))
            dense = tf.layers.dropout(
                dense,
                rate=params['dropout_rate'],
                training=(mode == tf.estimator.ModeKeys.TRAIN))
            add_layer_summary(dense.name, dense)

    with tf.variable_scope('output'):
        y = dense + fm_output + linear_output
        add_layer_summary('output', y)

    return y
Пример #3
0
def model_fn_dense(features, labels, mode, params):
    dense_feature, sparse_feature = build_features()
    dense_input = tf.feature_column.input_layer(features, dense_feature)
    sparse_input = tf.feature_column.input_layer(features, sparse_feature)

    # Linear part
    with tf.variable_scope('Linear_component'):
        linear_output = tf.layers.dense(sparse_input, units=1)
        add_layer_summary('linear_output', linear_output)

    field_size = len(dense_feature)
    emb_size = dense_feature[0].variable_shape.as_list()[-1]
    embedding_matrix = tf.reshape(dense_input, [-1, field_size, emb_size])

    # SENET_layer to get new embedding matrix
    senet_embedding_matrix = SENET_layer(embedding_matrix,
                                         field_size,
                                         emb_size,
                                         pool_op=params['pool_op'],
                                         ratio=params['senet_ratio'])

    # combination layer & BI_interaction
    BI_org = Bilinear_layer(embedding_matrix,
                            field_size,
                            emb_size,
                            type=params['model_type'],
                            name='org')
    BI_senet = Bilinear_layer(senet_embedding_matrix,
                              field_size,
                              emb_size,
                              type=params['model_type'],
                              name='senet')

    combination_layer = tf.concat([BI_org, BI_senet], axis=1)

    # Deep part
    dense_output = stack_dense_layer(combination_layer,
                                     params['hidden_units'],
                                     params['dropout_rate'],
                                     params['batch_norm'],
                                     mode,
                                     add_summary=True)

    with tf.variable_scope('output'):
        y = dense_output + linear_output
        add_layer_summary('output', y)

    return y