예제 #1
0
def model_fn_sparse(features, labels, mode, params):
    # hyper parameter
    data_params = params['data_params']
    field_size = data_params['field_size']
    feature_size = data_params['feature_size']
    embedding_size = data_params['embedding_size']

    # extract feature
    feat_ids = tf.reshape(features['feat_ids'],
                          shape=[-1, field_size])  # batch * field_size
    feat_vals = tf.reshape(features['feat_vals'],
                           shape=[-1, field_size])  # batch * field_size

    # extract embedding
    with tf.variable_scope('extract_embedding'):
        embedding_matrix = sparse_embedding(
            feature_size,
            embedding_size,
            field_size,
            feat_ids,
            feat_vals,
            add_summary=True)  # (batch, field_size, embedding_size)
        dense_input = tf.reshape(embedding_matrix,
                                 [-1, field_size * embedding_size
                                  ])  # (batch, field_size * embedding_size)

    # linear part
    linear_output = sparse_linear(feature_size,
                                  feat_ids,
                                  feat_vals,
                                  add_summary=True)

    # Deep part
    dense_output = stack_dense_layer(dense_input,
                                     params['hidden_units'],
                                     params['dropout_rate'],
                                     params['batch_norm'],
                                     mode,
                                     add_summary=True)
    # CIN part
    cin_output = cin_layer(embedding_matrix, params['cin_layer_size'],
                           embedding_size, field_size)

    # concat and output
    with tf.variable_scope('output'):
        y = tf.concat([dense_output, cin_output, linear_output], axis=1)
        y = tf.layers.dense(y, units=1)
        add_layer_summary('output', y)

    return y
예제 #2
0
파일: NFM.py 프로젝트: JerryCatLeung/CTR-2
def model_fn_sparse(features, labels, mode, params):
    # hyper parameter
    data_params = params['data_params']
    field_size = data_params['field_size']
    feature_size = data_params['feature_size']
    embedding_size = data_params['embedding_size']

    # extract feature
    feat_ids = tf.reshape(features['feat_ids'],
                          shape=[-1, field_size])  # batch * field_size
    feat_vals = tf.reshape(features['feat_vals'],
                           shape=[-1, field_size])  # batch * field_size

    # extract embedding
    embedding_matrix = sparse_embedding(feature_size,
                                        embedding_size,
                                        field_size,
                                        feat_ids,
                                        feat_vals,
                                        add_summary=True)

    # linear output
    linear_output = sparse_linear(feature_size,
                                  feat_ids,
                                  feat_vals,
                                  add_summary=True)

    with tf.variable_scope('BI_Pooling'):
        sum_square = tf.pow(tf.reduce_sum(embedding_matrix, axis=1), 2)
        square_sum = tf.reduce_sum(tf.pow(embedding_matrix, 2), axis=1)
        dense = tf.subtract(sum_square, square_sum)
        add_layer_summary(dense.name, dense)

    # fully connected stacked dense layers
    dense = stack_dense_layer(dense,
                              params['hidden_units'],
                              dropout_rate=params['dropout_rate'],
                              batch_norm=params['batch_norm'],
                              mode=mode,
                              add_summary=True)

    with tf.variable_scope('output'):
        y = linear_output + dense
        add_layer_summary('output', y)

    return y
예제 #3
0
파일: AFM.py 프로젝트: JerryCatLeung/CTR-2
def model_fn_sparse(features, labels, mode, params):
    # hyper parameter
    data_params = params['data_params']
    field_size = data_params['field_size']
    feature_size = data_params['feature_size']
    embedding_size = data_params['embedding_size']

    # extract feature
    feat_ids = tf.reshape(features['feat_ids'],
                          shape=[-1, field_size])  # batch * field_size
    feat_vals = tf.reshape(features['feat_vals'],
                           shape=[-1, field_size])  # batch * field_size

    # extract embedding
    embedding_matrix = sparse_embedding(feature_size,
                                        embedding_size,
                                        field_size,
                                        feat_ids,
                                        feat_vals,
                                        add_summary=True)

    # linear output
    linear_output = sparse_linear(feature_size,
                                  feat_ids,
                                  feat_vals,
                                  add_summary=True)

    with tf.variable_scope('Elementwise_Interaction'):
        elementwise_list = []
        for i in range(field_size):
            for j in range(i + 1, field_size):
                vi = tf.gather(embedding_matrix,
                               indices=i,
                               axis=1,
                               batch_dims=0,
                               name='vi')  # batch * emb_size
                vj = tf.gather(embedding_matrix,
                               indices=j,
                               axis=1,
                               batch_dims=0,
                               name='vj')
                elementwise_list.append(tf.multiply(vi,
                                                    vj))  # batch * emb_size
        elementwise_matrix = tf.stack(
            elementwise_list)  # (N*(N-1)/2) * batch * emb_size
        elementwise_matrix = tf.transpose(
            elementwise_matrix, [1, 0, 2])  # batch * (N*(N-1)/2) * emb_size

    with tf.variable_scope('Attention_Net'):
        # 2 fully connected layer
        dense = tf.layers.dense(elementwise_matrix,
                                units=params['attention_factor'],
                                activation='relu')  # batch * (N*(N-1)/2) * t
        add_layer_summary(dense.name, dense)
        attention_weight = tf.layers.dense(
            dense, units=1, activation='softmax')  # batch *(N*(N-1)/2) * 1
        add_layer_summary(attention_weight.name, attention_weight)

    with tf.variable_scope('Attention_pooling'):
        interaction_output = tf.reduce_sum(tf.multiply(elementwise_matrix,
                                                       attention_weight),
                                           axis=1)  # batch * k
        interaction_output = tf.layers.dense(interaction_output,
                                             units=1)  # batch * 1

    with tf.variable_scope('output'):
        y = interaction_output + linear_output
        add_layer_summary('output', y)

    return y
예제 #4
0
def model_fn_sparse(features, labels, mode, params):
    # hyper parameter
    data_params = params['data_params']
    field_size = data_params['field_size']
    feature_size = data_params['feature_size']
    embedding_size = data_params['embedding_size']

    # extract feature
    feat_ids = tf.reshape(features['feat_ids'],
                          shape=[-1, field_size])  # batch * field_size
    feat_vals = tf.reshape(features['feat_vals'],
                           shape=[-1, field_size])  # batch * field_size

    # extract embedding
    with tf.variable_scope('extract_embedding'):
        embedding_matrix = sparse_embedding(
            feature_size,
            embedding_size,
            field_size,
            feat_ids,
            feat_vals,
            add_summary=True)  # (batch, field_size, embedding_size)

    # linear part
    linear_output = sparse_linear(feature_size,
                                  feat_ids,
                                  feat_vals,
                                  add_summary=True)

    # SENET_layer to get new embedding matrix
    senet_embedding_matrix = SENET_layer(embedding_matrix,
                                         field_size,
                                         embedding_size,
                                         pool_op=params['pool_op'],
                                         ratio=params['senet_ratio'])

    # combination layer & BI_interaction
    BI_org = Bilinear_layer(embedding_matrix,
                            field_size,
                            embedding_size,
                            type=params['model_type'],
                            name='org')
    BI_senet = Bilinear_layer(senet_embedding_matrix,
                              field_size,
                              embedding_size,
                              type=params['model_type'],
                              name='senet')

    combination_layer = tf.concat([BI_org, BI_senet], axis=1)

    # Deep part
    dense_output = stack_dense_layer(combination_layer,
                                     params['hidden_units'],
                                     params['dropout_rate'],
                                     params['batch_norm'],
                                     mode,
                                     add_summary=True)

    with tf.variable_scope('output'):
        y = dense_output + linear_output
        add_layer_summary('output', y)

    return y