def input_from_feature_columns(features, feature_columns, l2_reg, init_std, seed, prefix='', seq_mask_zero=True, support_dense=True, support_group=False, embedding_matrix_dict=None): sparse_feature_columns = list( filter(lambda x: isinstance(x, SparseFeat), feature_columns)) if feature_columns else [] varlen_sparse_feature_columns = list( filter(lambda x: isinstance(x, VarLenSparseFeat), feature_columns)) if feature_columns else [] if embedding_matrix_dict is None: embedding_matrix_dict = create_embedding_matrix( feature_columns, l2_reg, init_std, seed, prefix=prefix, seq_mask_zero=seq_mask_zero) group_sparse_embedding_dict = embedding_lookup(embedding_matrix_dict, features, sparse_feature_columns) dense_value_list = get_dense_input(features, feature_columns) if not support_dense and len(dense_value_list) > 0: raise ValueError("DenseFeat is not supported in dnn_feature_columns") sequence_embed_dict = varlen_embedding_lookup( embedding_matrix_dict, features, varlen_sparse_feature_columns) group_varlen_sparse_embedding_dict = get_varlen_pooling_list( sequence_embed_dict, features, varlen_sparse_feature_columns) group_embedding_dict = mergeDict(group_sparse_embedding_dict, group_varlen_sparse_embedding_dict) if not support_group: group_embedding_dict = list( chain.from_iterable(group_embedding_dict.values())) return group_embedding_dict, dense_value_list
def KDD_DIN(dnn_feature_columns, history_feature_list, dnn_use_bn=False, dnn_hidden_units=(200, 80), dnn_activation='relu', att_hidden_size=(80, 40), att_activation="dice", att_weight_normalization=False, l2_reg_dnn=0, l2_reg_embedding=1e-6, dnn_dropout=0, init_std=0.0001, seed=1024, task='binary'): """Instantiates the Deep Interest Network architecture. :param dnn_feature_columns: An iterable containing all the features used by deep part of the model. :param history_feature_list: list,to indicate sequence sparse field :param dnn_use_bn: bool. Whether use BatchNormalization before activation or not in deep net :param dnn_hidden_units: list,list of positive integer or empty list, the layer number and units in each layer of deep net :param dnn_activation: Activation function to use in deep net :param att_hidden_size: list,list of positive integer , the layer number and units in each layer of attention net :param att_activation: Activation function to use in attention net :param att_weight_normalization: bool.Whether normalize the attention score of local activation unit. :param l2_reg_dnn: float. L2 regularizer strength applied to DNN :param l2_reg_embedding: float. L2 regularizer strength applied to embedding vector :param dnn_dropout: float in [0,1), the probability we will drop out a given DNN coordinate. :param init_std: float,to use as the initialize std of embedding vector :param seed: integer ,to use as random seed. :param task: str, ``"binary"`` for binary logloss or ``"regression"`` for regression loss :return: A Keras model instance. """ features = build_input_features(dnn_feature_columns) sparse_feature_columns = list( filter(lambda x: isinstance(x, SparseFeat), dnn_feature_columns)) if dnn_feature_columns else [] dense_feature_columns = list( filter(lambda x: isinstance(x, DenseFeat), dnn_feature_columns)) if dnn_feature_columns else [] varlen_sparse_feature_columns = list( filter(lambda x: isinstance(x, VarLenSparseFeat), dnn_feature_columns)) if dnn_feature_columns else [] history_feature_columns = [] sparse_varlen_feature_columns = [] history_fc_names = list(map(lambda x: "hist_" + x, history_feature_list)) for fc in varlen_sparse_feature_columns: feature_name = fc.name if feature_name in history_fc_names: history_feature_columns.append(fc) else: sparse_varlen_feature_columns.append(fc) inputs_list = list(features.values()) embedding_dict = kdd_create_embedding_matrix(dnn_feature_columns, l2_reg_embedding, init_std, seed, prefix="") query_emb_list = embedding_lookup(embedding_dict, features, sparse_feature_columns, history_feature_list, history_feature_list, to_list=True) keys_emb_list = embedding_lookup(embedding_dict, features, history_feature_columns, history_fc_names, history_fc_names, to_list=True) dnn_input_emb_list = embedding_lookup(embedding_dict, features, sparse_feature_columns, mask_feat_list=history_feature_list, to_list=True) dense_value_list = get_dense_input(features, dense_feature_columns) sequence_embed_dict = varlen_embedding_lookup( embedding_dict, features, sparse_varlen_feature_columns) sequence_embed_list = get_varlen_pooling_list( sequence_embed_dict, features, sparse_varlen_feature_columns, to_list=True) dnn_input_emb_list += sequence_embed_list keys_emb = concat_func(keys_emb_list, mask=True) deep_input_emb = concat_func(dnn_input_emb_list) query_emb = concat_func(query_emb_list, mask=True) hist = AttentionSequencePoolingLayer( att_hidden_size, att_activation, weight_normalization=att_weight_normalization, supports_masking=True)([query_emb, keys_emb]) deep_input_emb = Concatenate()([NoMask()(deep_input_emb), hist]) deep_input_emb = Flatten()(deep_input_emb) dnn_input = combined_dnn_input([deep_input_emb], dense_value_list) output = DNN(dnn_hidden_units, dnn_activation, l2_reg_dnn, dnn_dropout, dnn_use_bn, seed)(dnn_input) final_logit = Dense(1, use_bias=False)(output) output = PredictionLayer(task)(final_logit) model = Model(inputs=inputs_list, outputs=output) return model
def MIND(user_feature_columns, item_feature_columns, num_sampled=5, k_max=2, p=1.0, dynamic_k=False, user_dnn_hidden_units=(64, 32), dnn_activation='relu', dnn_use_bn=False, l2_reg_dnn=0, l2_reg_embedding=1e-6, dnn_dropout=0, init_std=0.0001, seed=1024): """Instantiates the MIND Model architecture. :param user_feature_columns: An iterable containing user's features used by the model. :param item_feature_columns: An iterable containing item's features used by the model. :param num_sampled: int, the number of classes to randomly sample per batch. :param k_max: int, the max size of user interest embedding :param p: float,the parameter for adjusting the attention distribution in LabelAwareAttention. :param dynamic_k: bool, whether or not use dynamic interest number :param dnn_use_bn: bool. Whether use BatchNormalization before activation or not in deep net :param user_dnn_hidden_units: list,list of positive integer or empty list, the layer number and units in each layer of user tower :param dnn_activation: Activation function to use in deep net :param dnn_use_bn: bool. Whether use BatchNormalization before activation or not in deep net :param l2_reg_dnn: L2 regularizer strength applied to DNN :param l2_reg_embedding: float. L2 regularizer strength applied to embedding vector :param dnn_dropout: float in [0,1), the probability we will drop out a given DNN coordinate. :param init_std: float,to use as the initialize std of embedding vector :param seed: integer ,to use as random seed. :return: A Keras model instance. """ if len(item_feature_columns) > 1: raise ValueError("Now MIND only support 1 item feature like item_id") item_feature_column = item_feature_columns[0] item_feature_name = item_feature_column.name history_feature_list = [item_feature_name] features = build_input_features(user_feature_columns) sparse_feature_columns = list( filter(lambda x: isinstance(x, SparseFeat), user_feature_columns)) if user_feature_columns else [] dense_feature_columns = list( filter(lambda x: isinstance(x, DenseFeat), user_feature_columns)) if user_feature_columns else [] varlen_sparse_feature_columns = list( filter(lambda x: isinstance(x, VarLenSparseFeat), user_feature_columns)) if user_feature_columns else [] history_feature_columns = [] sparse_varlen_feature_columns = [] history_fc_names = list(map(lambda x: "hist_" + x, history_feature_list)) for fc in varlen_sparse_feature_columns: feature_name = fc.name if feature_name in history_fc_names: history_feature_columns.append(fc) else: sparse_varlen_feature_columns.append(fc) inputs_list = list(features.values()) embedding_dict = create_embedding_matrix(user_feature_columns + item_feature_columns, l2_reg_embedding, init_std, seed, prefix="") item_features = build_input_features(item_feature_columns) query_emb_list = embedding_lookup(embedding_dict, item_features, item_feature_columns, history_feature_list, history_feature_list, to_list=True) keys_emb_list = embedding_lookup(embedding_dict, features, history_feature_columns, history_fc_names, history_fc_names, to_list=True) dnn_input_emb_list = embedding_lookup(embedding_dict, features, sparse_feature_columns, mask_feat_list=history_feature_list, to_list=True) dense_value_list = get_dense_input(features, dense_feature_columns) sequence_embed_dict = varlen_embedding_lookup( embedding_dict, features, sparse_varlen_feature_columns) sequence_embed_list = get_varlen_pooling_list( sequence_embed_dict, features, sparse_varlen_feature_columns, to_list=True) dnn_input_emb_list += sequence_embed_list # keys_emb = concat_func(keys_emb_list, mask=True) # query_emb = concat_func(query_emb_list, mask=True) history_emb = PoolingLayer()(NoMask()(keys_emb_list)) target_emb = PoolingLayer()(NoMask()(query_emb_list)) target_emb_size = target_emb.get_shape()[-1].value max_len = history_emb.get_shape()[1].value hist_len = features['hist_len'] high_capsule = CapsuleLayer(input_units=target_emb_size, out_units=target_emb_size, max_len=max_len, k_max=k_max)((history_emb, hist_len)) if len(dnn_input_emb_list) > 0 or len(dense_value_list) > 0: user_other_feature = combined_dnn_input(dnn_input_emb_list, dense_value_list) other_feature_tile = tf.keras.layers.Lambda( tile_user_otherfeat, arguments={'k_max': k_max})(user_other_feature) user_deep_input = Concatenate()( [NoMask()(other_feature_tile), high_capsule]) else: user_deep_input = high_capsule # user_deep_input._uses_learning_phase = True # attention_score._uses_learning_phase user_embeddings = DNN(user_dnn_hidden_units, dnn_activation, l2_reg_dnn, dnn_dropout, dnn_use_bn, seed, name="user_embedding")(user_deep_input) item_inputs_list = list(item_features.values()) item_embedding = embedding_dict[item_feature_name] if dynamic_k: user_embedding_final = LabelAwareAttention( k_max=k_max, pow_p=p, )((user_embeddings, target_emb, hist_len)) else: user_embedding_final = LabelAwareAttention( k_max=k_max, pow_p=p, )((user_embeddings, target_emb)) output = SampledSoftmaxLayer(item_embedding, num_sampled=num_sampled)( inputs=(user_embedding_final, item_features[item_feature_name])) model = Model(inputs=inputs_list + item_inputs_list, outputs=output) model.__setattr__("user_input", inputs_list) model.__setattr__("user_embedding", user_embeddings) model.__setattr__("item_input", item_inputs_list) model.__setattr__( "item_embedding", get_item_embedding(item_embedding, item_features[item_feature_name])) return model
def xDeepFM_MTL( linear_feature_columns, dnn_feature_columns, gate_feature_columns, embedding_size=8, dnn_hidden_units=(256, 256), cin_layer_size=( 256, 256, ), cin_split_half=True, init_std=0.0001, l2_reg_dnn=0, dnn_dropout=0, dnn_activation='relu', dnn_use_bn=False, task_net_size=(128, ), l2_reg_linear=0.00001, l2_reg_embedding=0.00001, seed=1024, ): # check_feature_config_dict(feature_dim_dict) if len(task_net_size) < 1: raise ValueError('task_net_size must be at least one layer') features = build_input_features(linear_feature_columns + dnn_feature_columns + gate_feature_columns) inputs_list = list(features.values()) sparse_embedding_list, dense_value_list = input_from_feature_columns( features, dnn_feature_columns, embedding_size, l2_reg_embedding, init_std, seed) gate = get_dense_input(features, gate_feature_columns)[0] linear_logit = get_linear_logit(features, linear_feature_columns, l2_reg=l2_reg_linear, init_std=init_std, seed=seed, prefix='linear') fm_input = concat_fun(sparse_embedding_list, axis=1) if len(cin_layer_size) > 0: exFM_out = CIN(cin_layer_size, 'relu', cin_split_half, 0, seed)(fm_input) exFM_logit = tf.keras.layers.Dense( 1, activation=None, )(exFM_out) # dnn_input = combined_dnn_input(sparse_embedding_list, dense_value_list) dnn_input = tf.keras.layers.Flatten()(fm_input) deep_out = DNN(dnn_hidden_units, dnn_activation, l2_reg_dnn, dnn_dropout, dnn_use_bn, seed)(dnn_input) finish_out1 = DNN(task_net_size)(deep_out) finish_logit1 = tf.keras.layers.Dense(1, use_bias=False, activation=None)(finish_out1) like_out1 = DNN(task_net_size)(deep_out) like_logit1 = tf.keras.layers.Dense(1, use_bias=False, activation=None)(like_out1) finish_out2 = DNN(task_net_size)(deep_out) finish_logit2 = tf.keras.layers.Dense(1, use_bias=False, activation=None)(finish_out2) like_out2 = DNN(task_net_size)(deep_out) like_logit2 = tf.keras.layers.Dense(1, use_bias=False, activation=None)(like_out2) # condition = tf.placeholder("float32", shape=[None, 1], name="condition") finish_logit = gate * finish_logit1 + (1.0 - gate) * finish_logit2 like_logit = gate * like_logit1 + (1.0 - gate) * like_logit2 print(np.shape(like_logit)) finish_logit = tf.keras.layers.add( [linear_logit, finish_logit, exFM_logit]) like_logit = tf.keras.layers.add([linear_logit, like_logit, exFM_logit]) output_finish = PredictionLayer('binary', name='finish')(finish_logit) output_like = PredictionLayer('binary', name='like')(like_logit) model = tf.keras.models.Model(inputs=inputs_list, outputs=[output_finish, output_like]) return model
def _model_fn(features, labels, mode, config): train_flag = (mode == tf.estimator.ModeKeys.TRAIN) with variable_scope(DNN_SCOPE_NAME): sparse_feature_columns = [] dense_feature_columns = [] varlen_sparse_feature_columns = [] for feat in dnn_feature_columns: new_feat_name = list(feat.parse_example_spec.keys())[0] if new_feat_name in ['hist_price_id', 'hist_des_id']: varlen_sparse_feature_columns.append( VarLenSparseFeat(SparseFeat(new_feat_name, vocabulary_size=100, embedding_dim=32, use_hash=False), maxlen=3)) elif is_embedding(feat): sparse_feature_columns.append( SparseFeat(new_feat_name, vocabulary_size=feat[0]._num_buckets + 1, embedding_dim=feat.dimension)) else: dense_feature_columns.append(DenseFeat(new_feat_name)) history_feature_columns = [] sparse_varlen_feature_columns = [] history_fc_names = list( map(lambda x: "hist_" + x, history_feature_list)) for fc in varlen_sparse_feature_columns: feature_name = fc.name if feature_name in history_fc_names: history_feature_columns.append(fc) else: sparse_varlen_feature_columns.append(fc) my_feature_columns = sparse_feature_columns + dense_feature_columns + varlen_sparse_feature_columns embedding_dict = create_embedding_matrix(my_feature_columns, l2_reg_embedding, seed, prefix="") query_emb_list = embedding_lookup(embedding_dict, features, sparse_feature_columns, history_feature_list, history_feature_list, to_list=True) print('query_emb_list', query_emb_list) print('embedding_dict', embedding_dict) print('haha') print('history_feature_columns', history_feature_columns) print('haha') keys_emb_list = embedding_lookup(embedding_dict, features, history_feature_columns, history_fc_names, history_fc_names, to_list=True) print('keys_emb_list', keys_emb_list) dnn_input_emb_list = embedding_lookup( embedding_dict, features, sparse_feature_columns, mask_feat_list=history_feature_list, to_list=True) print('dnn_input_emb_list', dnn_input_emb_list) dense_value_list = get_dense_input(features, dense_feature_columns) sequence_embed_dict = varlen_embedding_lookup( embedding_dict, features, sparse_varlen_feature_columns) sequence_embed_list = get_varlen_pooling_list( sequence_embed_dict, features, sparse_varlen_feature_columns, to_list=True) dnn_input_emb_list += sequence_embed_list keys_emb = concat_func(keys_emb_list, mask=True) deep_input_emb = concat_func(dnn_input_emb_list) query_emb = concat_func(query_emb_list, mask=True) hist = AttentionSequencePoolingLayer( att_hidden_size, att_activation, weight_normalization=att_weight_normalization, supports_masking=True)([query_emb, keys_emb]) deep_input_emb = tf.keras.layers.Concatenate()( [NoMask()(deep_input_emb), hist]) deep_input_emb = tf.keras.layers.Flatten()(deep_input_emb) dnn_input = combined_dnn_input([deep_input_emb], dense_value_list) output = DNN(dnn_hidden_units, dnn_activation, l2_reg_dnn, dnn_dropout, dnn_use_bn, seed=seed)(dnn_input) final_logit = tf.keras.layers.Dense( 1, use_bias=False, kernel_initializer=tf.keras.initializers.glorot_normal(seed))( output) # logits_list.append(final_logit) # logits = add_func(logits_list) # print(labels) # tf.summary.histogram(final_logit + '/final_logit', final_logit) return deepctr_model_fn(features, mode, final_logit, labels, task, linear_optimizer, dnn_optimizer, training_chief_hooks=training_chief_hooks)