Exemple #1
0
def model_fn_dense(features, labels, mode, params):
    dense_feature, sparse_feature = build_features()
    dense = tf.feature_column.input_layer(features, dense_feature)
    sparse = tf.feature_column.input_layer(features, sparse_feature)

    field_size = len(dense_feature)
    embedding_size = dense_feature[0].variable_shape.as_list()[-1]
    embedding_matrix = tf.reshape(
        dense,
        [-1, field_size, embedding_size])  # batch * field_size *emb_size

    with tf.variable_scope('Linear_output'):
        linear_output = tf.layers.dense(sparse, units=1)
        add_layer_summary('linear_output', linear_output)

    with tf.variable_scope('BI_Pooling'):
        sum_square = tf.pow(tf.reduce_sum(embedding_matrix, axis=1), 2)
        square_sum = tf.reduce_sum(tf.pow(embedding_matrix, 2), axis=1)
        dense = tf.subtract(sum_square, square_sum)
        add_layer_summary(dense.name, dense)

    dense = stack_dense_layer(dense,
                              params['hidden_units'],
                              dropout_rate=params['dropout_rate'],
                              batch_norm=params['batch_norm'],
                              mode=mode,
                              add_summary=True)

    with tf.variable_scope('output'):
        y = linear_output + dense
        add_layer_summary('output', y)

    return y
Exemple #2
0
def model_fn_dense(features, labels, mode, params):
    dense_feature, sparse_feature = build_features()
    dense_input = tf.feature_column.input_layer(features, dense_feature)
    sparse_input = tf.feature_column.input_layer(features, sparse_feature)

    # Linear part
    with tf.variable_scope('Linear_component'):
        linear_output = tf.layers.dense(sparse_input, units=1)
        add_layer_summary('linear_output', linear_output)

    # Deep part
    dense_output = stack_dense_layer(dense_input,
                                     params['hidden_units'],
                                     params['dropout_rate'],
                                     params['batch_norm'],
                                     mode,
                                     add_summary=True)
    # CIN part
    emb_size = dense_feature[0].variable_shape.as_list()[-1]
    field_size = len(dense_feature)
    embedding_matrix = tf.reshape(
        dense_input,
        [-1, field_size, emb_size])  # batch * field_size * emb_size
    add_layer_summary('embedding_matrix', embedding_matrix)

    cin_output = cin_layer(embedding_matrix, params['cin_layer_size'],
                           emb_size, field_size)

    with tf.variable_scope('output'):
        y = tf.concat([dense_output, cin_output, linear_output], axis=1)
        y = tf.layers.dense(y, units=1)
        add_layer_summary('output', y)

    return y
Exemple #3
0
def model_fn_sparse(features, labels, mode, params):
    # hyper parameter
    data_params = params['data_params']
    field_size = data_params['field_size']
    feature_size = data_params['feature_size']
    embedding_size = data_params['embedding_size']

    # extract feature
    feat_ids = tf.reshape(features['feat_ids'],
                          shape=[-1, field_size])  # batch * field_size
    feat_vals = tf.reshape(features['feat_vals'],
                           shape=[-1, field_size])  # batch * field_size

    # extract embedding
    with tf.variable_scope('extract_embedding'):
        embedding_matrix = sparse_embedding(
            feature_size,
            embedding_size,
            field_size,
            feat_ids,
            feat_vals,
            add_summary=True)  # (batch, field_size, embedding_size)
        dense_input = tf.reshape(embedding_matrix,
                                 [-1, field_size * embedding_size
                                  ])  # (batch, field_size * embedding_size)

    # linear part
    linear_output = sparse_linear(feature_size,
                                  feat_ids,
                                  feat_vals,
                                  add_summary=True)

    # Deep part
    dense_output = stack_dense_layer(dense_input,
                                     params['hidden_units'],
                                     params['dropout_rate'],
                                     params['batch_norm'],
                                     mode,
                                     add_summary=True)
    # CIN part
    cin_output = cin_layer(embedding_matrix, params['cin_layer_size'],
                           embedding_size, field_size)

    # concat and output
    with tf.variable_scope('output'):
        y = tf.concat([dense_output, cin_output, linear_output], axis=1)
        y = tf.layers.dense(y, units=1)
        add_layer_summary('output', y)

    return y
Exemple #4
0
def model_fn_dense(features, labels, mode, params):
    dense_feature, sparse_feature = build_features()
    dense_input = tf.feature_column.input_layer(features, dense_feature)
    sparse_input = tf.feature_column.input_layer(features, sparse_feature)

    # Linear part
    with tf.variable_scope('Linear_component'):
        linear_output = tf.layers.dense(sparse_input, units=1)
        add_layer_summary('linear_output', linear_output)

    field_size = len(dense_feature)
    emb_size = dense_feature[0].variable_shape.as_list()[-1]
    embedding_matrix = tf.reshape(dense_input, [-1, field_size, emb_size])

    # SENET_layer to get new embedding matrix
    senet_embedding_matrix = SENET_layer(embedding_matrix,
                                         field_size,
                                         emb_size,
                                         pool_op=params['pool_op'],
                                         ratio=params['senet_ratio'])

    # combination layer & BI_interaction
    BI_org = Bilinear_layer(embedding_matrix,
                            field_size,
                            emb_size,
                            type=params['model_type'],
                            name='org')
    BI_senet = Bilinear_layer(senet_embedding_matrix,
                              field_size,
                              emb_size,
                              type=params['model_type'],
                              name='senet')

    combination_layer = tf.concat([BI_org, BI_senet], axis=1)

    # Deep part
    dense_output = stack_dense_layer(combination_layer,
                                     params['hidden_units'],
                                     params['dropout_rate'],
                                     params['batch_norm'],
                                     mode,
                                     add_summary=True)

    with tf.variable_scope('output'):
        y = dense_output + linear_output
        add_layer_summary('output', y)

    return y
Exemple #5
0
def model_fn_sparse(features, labels, mode, params):
    # hyper parameter
    data_params = params['data_params']
    field_size = data_params['field_size']
    feature_size = data_params['feature_size']
    embedding_size = data_params['embedding_size']

    # extract feature
    feat_ids = tf.reshape(features['feat_ids'],
                          shape=[-1, field_size])  # batch * field_size
    feat_vals = tf.reshape(features['feat_vals'],
                           shape=[-1, field_size])  # batch * field_size

    # extract embedding
    embedding_matrix = sparse_embedding(feature_size,
                                        embedding_size,
                                        field_size,
                                        feat_ids,
                                        feat_vals,
                                        add_summary=True)

    # linear output
    linear_output = sparse_linear(feature_size,
                                  feat_ids,
                                  feat_vals,
                                  add_summary=True)

    with tf.variable_scope('BI_Pooling'):
        sum_square = tf.pow(tf.reduce_sum(embedding_matrix, axis=1), 2)
        square_sum = tf.reduce_sum(tf.pow(embedding_matrix, 2), axis=1)
        dense = tf.subtract(sum_square, square_sum)
        add_layer_summary(dense.name, dense)

    # fully connected stacked dense layers
    dense = stack_dense_layer(dense,
                              params['hidden_units'],
                              dropout_rate=params['dropout_rate'],
                              batch_norm=params['batch_norm'],
                              mode=mode,
                              add_summary=True)

    with tf.variable_scope('output'):
        y = linear_output + dense
        add_layer_summary('output', y)

    return y
Exemple #6
0
def model_fn_sparse(features, labels, mode, params):
    # hyper parameter
    data_params = params['data_params']
    field_size = data_params['field_size']
    feature_size = data_params['feature_size']
    embedding_size = data_params['embedding_size']

    # extract feature
    feat_ids = tf.reshape(features['feat_ids'],
                          shape=[-1, field_size])  # (batch, field_size)
    feat_vals = tf.reshape(features['feat_vals'],
                           shape=[-1, field_size])  # (batch, field_size)

    # extract embedding
    with tf.variable_scope('extract_embedding'):
        embedding_matrix = sparse_embedding(
            feature_size,
            embedding_size,
            field_size,
            feat_ids,
            feat_vals,
            add_summary=True)  # (batch, field_size, embedding_size)
        dense_input = tf.reshape(embedding_matrix,
                                 [-1, field_size * embedding_size
                                  ])  # (batch, field_size * embedding_size)
    # deep part
    dense = stack_dense_layer(dense_input,
                              params['hidden_units'],
                              params['dropout_rate'],
                              params['batch_norm'],
                              mode,
                              add_summary=True)

    # cross part
    xl = cross_layer(dense_input, params['cross_layers'])

    with tf.variable_scope('stack'):
        x_stack = tf.concat([dense, xl], axis=1)

    with tf.variable_scope('output'):
        y = tf.layers.dense(x_stack, units=1)
        add_layer_summary('output', y)

    return y
Exemple #7
0
def model_fn_varlen(features, labels, mode, params):
    # ---general embedding layer---
    emb_dict = {}
    f_dense = build_features(params)
    f_dense = tf.compat.v1.feature_column.input_layer(features, f_dense) # 用户嵌入表示 [batch_size, f_num*emb_dim]
    emb_dict['dense_emb'] = f_dense

    # Embedding Look up: history item list and category list
    item_embedding = tf.compat.v1.get_variable(shape = [params['item_count'], params['emb_dim']],
                                     initializer = tf.truncated_normal_initializer(),
                                     name = 'item_embedding')
    cate_embedding = tf.compat.v1.get_variable(shape = [params['cate_count'], params['emb_dim']],
                                     initializer = tf.truncated_normal_initializer(),
                                     name = 'cate_embedding')
    item_emb = tf.nn.embedding_lookup( item_embedding, features['item'] )  # [batch_size, emb_dim]
    emb_dict['item_emb'] = item_emb
    item_hist_emb = tf.nn.embedding_lookup( item_embedding, features['hist_item_list'] )  # [batch_size, padded_size, emb_dim]
    emb_dict['item_hist_emb'] = item_hist_emb
    cate_emb = tf.nn.embedding_lookup( cate_embedding, features['item_cate'] )  # [batch_size, emb_dim]
    emb_dict['cate_emb'] = cate_emb
    cate_hist_emb = tf.nn.embedding_lookup( cate_embedding, features['hist_cate_list'] )  # [batch_size, padded_size, emb_dim]
    emb_dict['cate_hist_emb'] = cate_hist_emb

    # ---sequence embedding layer---
    seq_pooling_layer(features, params, emb_dict, mode)
    target_attention_layer(features, params, emb_dict)

    # Concat features
    concat_features = []
    for f in params['input_features']:
        concat_features.append(emb_dict[f])
    fc = tf.concat(concat_features, axis=1)

    # ---dnn layer---
    main_net = moe_layer(fc, params, mode, scope='main_dense_moe')
    bias_net = stack_dense_layer(fc, params['hidden_units'], params['dropout_rate'], params['batch_norm'],
                              mode, scope='bias_dense')

    # ---logits layer---
    main_y = tf.layers.dense(main_net, units=1, name='main_logit_net')
    bias_y = tf.layers.dense(bias_net, units=1, name='bias_logit_net')

    return main_y+bias_y
Exemple #8
0
def model_fn_dense(features, labels, mode, params):
    dense_feature = build_features()
    dense_input = tf.feature_column.input_layer(features, dense_feature)

    # deep part
    dense = stack_dense_layer(dense_input,
                              params['hidden_units'],
                              params['dropout_rate'],
                              params['batch_norm'],
                              mode,
                              add_summary=True)

    # cross part
    xl = cross_layer(dense_input, params['cross_layers'], params['cross_op'])

    with tf.variable_scope('stack'):
        x_stack = tf.concat([dense, xl], axis=1)

    with tf.variable_scope('output'):
        y = tf.layers.dense(x_stack, units=1)
        add_layer_summary('output', y)

    return y
Exemple #9
0
def model_fn_sparse(features, labels, mode, params):
    # hyper parameter
    data_params = params['data_params']
    field_size = data_params['field_size']
    feature_size = data_params['feature_size']
    embedding_size = data_params['embedding_size']

    # extract feature
    feat_ids = tf.reshape(features['feat_ids'],
                          shape=[-1, field_size])  # batch * field_size
    feat_vals = tf.reshape(features['feat_vals'],
                           shape=[-1, field_size])  # batch * field_size

    # extract embedding
    with tf.variable_scope('extract_embedding'):
        embedding_matrix = sparse_embedding(
            feature_size,
            embedding_size,
            field_size,
            feat_ids,
            feat_vals,
            add_summary=True)  # (batch, field_size, embedding_size)

    # linear part
    linear_output = sparse_linear(feature_size,
                                  feat_ids,
                                  feat_vals,
                                  add_summary=True)

    # SENET_layer to get new embedding matrix
    senet_embedding_matrix = SENET_layer(embedding_matrix,
                                         field_size,
                                         embedding_size,
                                         pool_op=params['pool_op'],
                                         ratio=params['senet_ratio'])

    # combination layer & BI_interaction
    BI_org = Bilinear_layer(embedding_matrix,
                            field_size,
                            embedding_size,
                            type=params['model_type'],
                            name='org')
    BI_senet = Bilinear_layer(senet_embedding_matrix,
                              field_size,
                              embedding_size,
                              type=params['model_type'],
                              name='senet')

    combination_layer = tf.concat([BI_org, BI_senet], axis=1)

    # Deep part
    dense_output = stack_dense_layer(combination_layer,
                                     params['hidden_units'],
                                     params['dropout_rate'],
                                     params['batch_norm'],
                                     mode,
                                     add_summary=True)

    with tf.variable_scope('output'):
        y = dense_output + linear_output
        add_layer_summary('output', y)

    return y