def DeepFM(linear_feature_columns, dnn_feature_columns, embedding_size=8, use_fm=True, use_only_dnn=False, dnn_hidden_units=(128, 128), l2_reg_linear=0.00001, l2_reg_embedding=0.00001, l2_reg_dnn=0, init_std=0.0001, seed=1024, dnn_dropout=0, dnn_activation='relu', dnn_use_bn=False, task='binary'): """Instantiates the DeepFM Network architecture. :param linear_feature_columns: An iterable containing all the features used by linear part of the model. :param dnn_feature_columns: An iterable containing all the features used by deep part of the model. :param embedding_size: positive integer,sparse feature embedding_size :param use_fm: bool,use FM part or not :param dnn_hidden_units: list,list of positive integer or empty list, the layer number and units in each layer of DNN :param l2_reg_linear: float. L2 regularizer strength applied to linear part :param l2_reg_embedding: float. L2 regularizer strength applied to embedding vector :param l2_reg_dnn: float. L2 regularizer strength applied to DNN :param init_std: float,to use as the initialize std of embedding vector :param seed: integer ,to use as random seed. :param dnn_dropout: float in [0,1), the probability we will drop out a given DNN coordinate. :param dnn_activation: Activation function to use in DNN :param dnn_use_bn: bool. Whether use BatchNormalization before activation or not in DNN :param task: str, ``"binary"`` for binary logloss or ``"regression"`` for regression loss :return: A Keras model instance. """ features = build_input_features(linear_feature_columns + dnn_feature_columns) inputs_list = list(features.values()) sparse_embedding_list, dense_value_list = input_from_feature_columns( features, dnn_feature_columns, embedding_size, l2_reg_embedding, init_std, seed) linear_logit = get_linear_logit(features, linear_feature_columns, l2_reg=l2_reg_linear, init_std=init_std, seed=seed, prefix='linear') fm_input = concat_fun(sparse_embedding_list, axis=1) fm_logit = FM()(fm_input) dnn_input = combined_dnn_input(sparse_embedding_list, dense_value_list) dnn_out = DNN(dnn_hidden_units, dnn_activation, l2_reg_dnn, dnn_dropout, dnn_use_bn, seed)(dnn_input) dnn_logit = tf.keras.layers.Dense(1, use_bias=False, activation=None)(dnn_out) if use_only_dnn == True: final_logit = dnn_logit elif len(dnn_hidden_units) == 0 and use_fm == False: # only linear final_logit = linear_logit elif len(dnn_hidden_units) == 0 and use_fm == True: # linear + FM final_logit = tf.keras.layers.add([linear_logit, fm_logit]) elif len(dnn_hidden_units) > 0 and use_fm == False: # linear + Deep final_logit = tf.keras.layers.add([linear_logit, dnn_logit]) elif len(dnn_hidden_units) > 0 and use_fm == True: # linear + FM + Deep final_logit = tf.keras.layers.add([linear_logit, fm_logit, dnn_logit]) else: raise NotImplementedError output = PredictionLayer(task)(final_logit) model = tf.keras.models.Model(inputs=inputs_list, outputs=output) return model
def YoutubeDNN( user_feature_columns, item_feature_columns, num_sampled=5, user_dnn_hidden_units=(64, 16), dnn_activation='relu', dnn_use_bn=False, l2_reg_dnn=0, l2_reg_embedding=1e-6, dnn_dropout=0, init_std=0.0001, seed=1024, ): """Instantiates the YoutubeDNN Model architecture. :param user_feature_columns: An iterable containing user's features used by the model. :param item_feature_columns: An iterable containing item's features used by the model. :param num_sampled: int, the number of classes to randomly sample per batch. :param user_dnn_hidden_units: list,list of positive integer or empty list, the layer number and units in each layer of user tower :param dnn_activation: Activation function to use in deep net :param dnn_use_bn: bool. Whether use BatchNormalization before activation or not in deep net :param l2_reg_dnn: float. L2 regularizer strength applied to DNN :param l2_reg_embedding: float. L2 regularizer strength applied to embedding vector :param dnn_dropout: float in [0,1), the probability we will drop out a given DNN coordinate. :param init_std: float,to use as the initialize std of embedding vector :param seed: integer ,to use as random seed. :return: A Keras model instance. """ if len(item_feature_columns) > 1: raise ValueError( "Now YoutubeNN only support 1 item feature like item_id") item_feature_name = item_feature_columns[0].name embedding_matrix_dict = create_embedding_matrix(user_feature_columns + item_feature_columns, l2_reg_embedding, init_std, seed, prefix="") user_features = build_input_features(user_feature_columns) user_inputs_list = list(user_features.values()) user_sparse_embedding_list, user_dense_value_list = input_from_feature_columns( user_features, user_feature_columns, l2_reg_embedding, init_std, seed, embedding_matrix_dict=embedding_matrix_dict) user_dnn_input = combined_dnn_input(user_sparse_embedding_list, user_dense_value_list) item_features = build_input_features(item_feature_columns) item_inputs_list = list(item_features.values()) user_dnn_out = DNN( user_dnn_hidden_units, dnn_activation, l2_reg_dnn, dnn_dropout, dnn_use_bn, seed, )(user_dnn_input) item_embedding = embedding_matrix_dict[item_feature_name] output = SampledSoftmaxLayer(item_embedding, num_sampled=num_sampled)( inputs=(user_dnn_out, item_features[item_feature_name])) model = Model(inputs=user_inputs_list + item_inputs_list, outputs=output) model.__setattr__("user_input", user_inputs_list) model.__setattr__("user_embedding", user_dnn_out) model.__setattr__("item_input", item_inputs_list) model.__setattr__( "item_embedding", get_item_embedding(item_embedding, item_features[item_feature_name])) return model
def DeepFM(linear_feature_columns, dnn_feature_columns, embedding_size=8, use_fm=True, only_dnn=False, dnn_hidden_units=(128, 128), l2_reg_linear=0.00001, l2_reg_embedding=0.00001, l2_reg_dnn=0, init_std=0.0001, seed=1024, dnn_dropout=0, dnn_activation='relu', dnn_use_bn=False, task='binary'): """Instantiates the DeepFM Network architecture. :param linear_feature_columns: An iterable containing all the features used by linear part of the model. :param dnn_feature_columns: An iterable containing all the features used by deep part of the model. :param embedding_size: positive integer,sparse feature embedding_size :param use_fm: bool,use FM part or not :param dnn_hidden_units: list,list of positive integer or empty list, the layer number and units in each layer of DNN :param l2_reg_linear: float. L2 regularizer strength applied to linear part :param l2_reg_embedding: float. L2 regularizer strength applied to embedding vector :param l2_reg_dnn: float. L2 regularizer strength applied to DNN :param init_std: float,to use as the initialize std of embedding vector :param seed: integer ,to use as random seed. :param dnn_dropout: float in [0,1), the probability we will drop out a given DNN coordinate. :param dnn_activation: Activation function to use in DNN :param dnn_use_bn: bool. Whether use BatchNormalization before activation or not in DNN :param task: str, ``"binary"`` for binary logloss or ``"regression"`` for regression loss :return: A Keras model instance. """ ## 为每个特征创建Input[1,]; feature == > {'feature1': Input[1,], ...} features = build_input_features(linear_feature_columns + dnn_feature_columns) ## [Input1, Input2, ... ] inputs_list = list(features.values()) sparse_embedding_list, dense_value_list = input_from_feature_columns( features, dnn_feature_columns, embedding_size, l2_reg_embedding, init_std, seed) ## [feature_1对应的embedding层,下连接对应feature1的Input[1,]层,...], [feature_1对应的Input[1,]层,...] linear_logit = get_linear_logit(features, linear_feature_columns, l2_reg=l2_reg_linear, init_std=init_std, seed=seed, prefix='linear') # linear_logit_finish = get_linear_logit(features, linear_feature_columns, l2_reg=l2_reg_linear, init_std=init_std, # seed=seed, prefix='linear_finish') # linear_logit_like = get_linear_logit(features, linear_feature_columns, l2_reg=l2_reg_linear, init_std=init_std, # seed=seed, prefix='linear_like') ## 线性变换层,没有激活函数 fm_input = concat_fun(sparse_embedding_list, axis=1) ## 稀疏embedding层concate在一起 fm_logit = FM()(fm_input) # fm_logit_finish = FM()(fm_input) # fm_logit_like = FM()(fm_input) ## FM的二次项部分输出,不包含一次项和bias dnn_input = combined_dnn_input(sparse_embedding_list, dense_value_list) # dnn_out = Dense(128, dnn_activation, l2_reg_dnn, dnn_dropout, # dnn_use_bn, seed)(dnn_input) dnn_out = DNN(dnn_hidden_units, dnn_activation, l2_reg_dnn, dnn_dropout, dnn_use_bn, seed)(dnn_input) mmoe_out = MMoE(units=16, num_experts=8, num_tasks=2)(dnn_out) [finish_in, like_in] = mmoe_out finish_out_1 = Dense(128, dnn_activation, kernel_regularizer=l2(l2_reg_dnn))(finish_in) finish_out = Dense(128, dnn_activation, kernel_regularizer=l2(l2_reg_dnn))(finish_out_1) finish_logit = tf.keras.layers.Dense(1, use_bias=False, activation=None)(finish_out) like_out_1 = Dense(128, dnn_activation, kernel_regularizer=l2(l2_reg_dnn))(like_in) like_out = Dense(128, dnn_activation, kernel_regularizer=l2(l2_reg_dnn))(like_out_1) # finish_logit_stop_grad = Lambda(lambda x: stop_gradient(x))(finish_out) # like_out_finish = concat_fun([like_out, finish_logit_stop_grad]) like_logit = tf.keras.layers.Dense(1, use_bias=False, activation=None)(like_out) dnn_logit = tf.keras.layers.Dense(1, use_bias=False, activation=None)(dnn_out) # if len(dnn_hidden_units) > 0 and only_dnn == True: # final_logit = dnn_logit # elif len(dnn_hidden_units) == 0 and use_fm == False: # only linear # final_logit = linear_logit # elif len(dnn_hidden_units) == 0 and use_fm == True: # linear + FM # final_logit = tf.keras.layers.add([linear_logit, fm_logit]) # elif len(dnn_hidden_units) > 0 and use_fm == False: # linear + Deep # final_logit = tf.keras.layers.add([linear_logit, dnn_logit]) # elif len(dnn_hidden_units) > 0 and use_fm == True: # linear + FM + Deep # final_logit = tf.keras.layers.add([linear_logit, fm_logit, dnn_logit]) # else: # raise NotImplementedError finish_logit = tf.keras.layers.add([linear_logit, fm_logit, finish_logit]) like_logit = tf.keras.layers.add([linear_logit, fm_logit, like_logit]) output_finish = PredictionLayer('binary', name='finish')(finish_logit) output_like = PredictionLayer('binary', name='like')(like_logit) model = tf.keras.models.Model(inputs=inputs_list, outputs=[output_finish, output_like]) return model
def MT_xDeepFM(linear_feature_columns, dnn_feature_columns, embedding_size=8, dnn_hidden_units=(256, 256), cin_layer_size=(128, 128,), cin_split_half=True, cin_activation='relu', l2_reg_linear=0.00001, l2_reg_embedding=0.00001, l2_reg_dnn=0, l2_reg_cin=0, init_std=0.0001, seed=1024, dnn_dropout=0, dnn_activation='relu', dnn_use_bn=False, task='binary'): """Instantiates the xDeepFM architecture. :param flag_columns: :param linear_feature_columns: An iterable containing all the features used by linear part of the model. :param dnn_feature_columns: An iterable containing all the features used by deep part of the model. :param embedding_size: positive integer,sparse feature embedding_size :param dnn_hidden_units: list,list of positive integer or empty list, the layer number and units in each layer of deep net :param cin_layer_size: list,list of positive integer or empty list, the feature maps in each hidden layer of Compressed Interaction Network :param cin_split_half: bool.if set to True, half of the feature maps in each hidden will connect to output unit :param cin_activation: activation function used on feature maps :param l2_reg_linear: float. L2 regularizer strength applied to linear part :param l2_reg_embedding: L2 regularizer strength applied to embedding vector :param l2_reg_dnn: L2 regularizer strength applied to deep net :param l2_reg_cin: L2 regularizer strength applied to CIN. :param init_std: float,to use as the initialize std of embedding vector :param seed: integer ,to use as random seed. :param dnn_dropout: float in [0,1), the probability we will drop out a given DNN coordinate. :param dnn_activation: Activation function to use in DNN :param dnn_use_bn: bool. Whether use BatchNormalization before activation or not in DNN :param task: str, ``"binary"`` for binary logloss or ``"regression"`` for regression loss :return: A Keras model instance. """ features = build_input_features(linear_feature_columns + dnn_feature_columns) inputs_list = list(features.values()) sparse_embedding_list, dense_value_list = input_from_feature_columns(features,dnn_feature_columns, embedding_size, l2_reg_embedding,init_std, seed) linear_logit = get_linear_logit(features, linear_feature_columns, l2_reg=l2_reg_linear, init_std=init_std, seed=seed, prefix='linear') fm_input = concat_fun(sparse_embedding_list, axis=1) if len(cin_layer_size) > 0: exFM_out = CIN(cin_layer_size, cin_activation, cin_split_half, l2_reg_cin, seed)(fm_input) exFM_logit = tf.keras.layers.Dense(4, activation=None, )(exFM_out) dnn_input = combined_dnn_input(sparse_embedding_list,dense_value_list) deep_out = DNN(dnn_hidden_units, dnn_activation, l2_reg_dnn, dnn_dropout, dnn_use_bn, seed)(dnn_input) deep_logit = tf.keras.layers.Dense( 4, use_bias=False, activation=None)(deep_out) if len(dnn_hidden_units) == 0 and len(cin_layer_size) == 0: # only linear final_logit = linear_logit elif len(dnn_hidden_units) == 0 and len(cin_layer_size) > 0: # linear + CIN final_logit = tf.keras.layers.add([linear_logit, exFM_logit]) elif len(dnn_hidden_units) > 0 and len(cin_layer_size) == 0: # linear + Deep final_logit = tf.keras.layers.add([linear_logit, deep_logit]) elif len(dnn_hidden_units) > 0 and len(cin_layer_size) > 0: # linear + CIN + Deep final_logit = tf.keras.layers.add( [linear_logit, deep_logit, exFM_logit]) else: raise NotImplementedError output_units = PredictionLayer(task)(final_logit) # output = None # for i in range(len(flag_columns)): # print(i) # selected_index = [0, 1] if flag_columns[i] else [2, 3] # if output != None: # output = tf.concat([output, tf.reshape(tf.gather(output_units[i, :], selected_index), (1, -1))], axis=0) # else: # output = tf.reshape(tf.gather(output_units[i, :], selected_index), (1, -1)) finish = tf.cast(1-features['u_region_id'], dtype=tf.float32)*output_units[:,0]+\ tf.cast(features['u_region_id'], dtype=tf.float32)*output_units[:,1] like = tf.cast(1-features['u_region_id'], dtype=tf.float32)*output_units[:,2]+\ tf.cast(1-features['u_region_id'], dtype=tf.float32)*output_units[:,3] # mask = tf.cond(pred=tf.equal(features['u_region_id'], tf.constant(value = 1, dtype = tf.int32)), # true_fn=lambda: [True, True, False, False], false_fn=lambda: [False, False, True, True]) # output = tf.reshape(tf.boolean_mask(output_units, mask), shape=[-1, 2]) # finish = output[:, 0] # like = output[:, 1] # print(output) model = tf.keras.models.Model(inputs=inputs_list, outputs=[finish, like]) return model
train_model_input = [train[name] for name in feature_names] test_model_input = [test[name] for name in feature_names] features = build_input_features(linear_feature_columns + dnn_feature_columns) inputs_list = list(features.values()) sparse_embedding_list, dense_value_list = input_from_feature_columns( features, dnn_feature_columns, embedding_size=8, l2_reg=0.00001, init_std=0.0001, seed=1024) dnn_input = combined_dnn_input(sparse_embedding_list, dense_value_list) # print('test_model_input info') # print(len(test_model_input)) # print(type(test_model_input[0])) # print(len(test_model_input[0])) # print('num_features:',len(test_model_input[0]) ) # MMoE mmoe_input = tf.keras.layers.Dense(units=128, activation='relu')(dnn_input) mmoe_layers = MMoE(units=16, num_experts=8, num_tasks=2)(mmoe_input) print('passed') output_layers = [] # Build tower layer from MMoE layer output_info = ['finish', 'like']
def DeepFM(linear_feature_columns, dnn_feature_columns, fm_group=[DEFAULT_GROUP_NAME], dnn_hidden_units=(128, 128), l2_reg_linear=0.00001, l2_reg_embedding=0.00001, l2_reg_dnn=0, init_std=0.0001, seed=1024, dnn_dropout=0, dnn_activation='relu', dnn_use_bn=False, task='binary', use_image=False, use_text=False, embedding_size=128): """Instantiates the DeepFM Network architecture. :param linear_feature_columns: An iterable containing all the features used by linear part of the model. :param dnn_feature_columns: An iterable containing all the features used by deep part of the model. :param fm_group: list, group_name of features that will be used to do feature interactions. :param dnn_hidden_units: list,list of positive integer or empty list, the layer number and units in each layer of DNN :param l2_reg_linear: float. L2 regularizer strength applied to linear part :param l2_reg_embedding: float. L2 regularizer strength applied to embedding vector :param l2_reg_dnn: float. L2 regularizer strength applied to DNN :param init_std: float,to use as the initialize std of embedding vector :param seed: integer ,to use as random seed. :param dnn_dropout: float in [0,1), the probability we will drop out a given DNN coordinate. :param dnn_activation: Activation function to use in DNN :param dnn_use_bn: bool. Whether use BatchNormalization before activation or not in DNN :param task: str, ``"binary"`` for binary logloss or ``"regression"`` for regression loss :return: A Keras model instance. """ train_path = '../data/underexpose_train' features = build_input_features(linear_feature_columns + dnn_feature_columns) inputs_list = list(features.values()) group_embedding_dict, dense_value_list = input_from_feature_columns( features, dnn_feature_columns, l2_reg_embedding, init_std, seed, support_group=True) if use_image: video_input = tf.keras.layers.Input(shape=(128, ), name='image') video_emb = tf.keras.layers.Dense( embedding_size, use_bias=False, kernel_regularizer=l2(l2_reg_embedding))(video_input) video_emb = tf.keras.layers.Reshape( (1, embedding_size), input_shape=(embedding_size, ))(video_emb) group_embedding_dict[DEFAULT_GROUP_NAME].append(video_emb) inputs_list.append(video_input) if use_text: audio_input = tf.keras.layers.Input(shape=(128, ), name='text') audio_emb = tf.keras.layers.Dense( embedding_size, use_bias=False, kernel_regularizer=l2(l2_reg_embedding))(audio_input) audio_emb = tf.keras.layers.Reshape( (1, embedding_size), input_shape=(embedding_size, ))(audio_emb) group_embedding_dict[DEFAULT_GROUP_NAME].append(audio_emb) inputs_list.append(audio_input) linear_logit = get_linear_logit(features, linear_feature_columns, init_std=init_std, seed=seed, prefix='linear', l2_reg=l2_reg_linear) fm_logit = add_func([ FM()(concat_func(v, axis=1)) for k, v in group_embedding_dict.items() if k in fm_group ]) dnn_input = combined_dnn_input( list(chain.from_iterable(group_embedding_dict.values())), dense_value_list) dnn_output = DNN(dnn_hidden_units, dnn_activation, l2_reg_dnn, dnn_dropout, dnn_use_bn, seed)(dnn_input) dnn_logit = tf.keras.layers.Dense(1, use_bias=False, activation=None)(dnn_output) final_logit = add_func([linear_logit, fm_logit, dnn_logit]) output = PredictionLayer(task)(final_logit) model = tf.keras.models.Model(inputs=inputs_list, outputs=output) return model
def KDD_DIN(dnn_feature_columns, history_feature_list, dnn_use_bn=False, dnn_hidden_units=(200, 80), dnn_activation='relu', att_hidden_size=(80, 40), att_activation="dice", att_weight_normalization=False, l2_reg_dnn=0, l2_reg_embedding=1e-6, dnn_dropout=0, init_std=0.0001, seed=1024, task='binary'): """Instantiates the Deep Interest Network architecture. :param dnn_feature_columns: An iterable containing all the features used by deep part of the model. :param history_feature_list: list,to indicate sequence sparse field :param dnn_use_bn: bool. Whether use BatchNormalization before activation or not in deep net :param dnn_hidden_units: list,list of positive integer or empty list, the layer number and units in each layer of deep net :param dnn_activation: Activation function to use in deep net :param att_hidden_size: list,list of positive integer , the layer number and units in each layer of attention net :param att_activation: Activation function to use in attention net :param att_weight_normalization: bool.Whether normalize the attention score of local activation unit. :param l2_reg_dnn: float. L2 regularizer strength applied to DNN :param l2_reg_embedding: float. L2 regularizer strength applied to embedding vector :param dnn_dropout: float in [0,1), the probability we will drop out a given DNN coordinate. :param init_std: float,to use as the initialize std of embedding vector :param seed: integer ,to use as random seed. :param task: str, ``"binary"`` for binary logloss or ``"regression"`` for regression loss :return: A Keras model instance. """ features = build_input_features(dnn_feature_columns) sparse_feature_columns = list( filter(lambda x: isinstance(x, SparseFeat), dnn_feature_columns)) if dnn_feature_columns else [] dense_feature_columns = list( filter(lambda x: isinstance(x, DenseFeat), dnn_feature_columns)) if dnn_feature_columns else [] varlen_sparse_feature_columns = list( filter(lambda x: isinstance(x, VarLenSparseFeat), dnn_feature_columns)) if dnn_feature_columns else [] history_feature_columns = [] sparse_varlen_feature_columns = [] history_fc_names = list(map(lambda x: "hist_" + x, history_feature_list)) for fc in varlen_sparse_feature_columns: feature_name = fc.name if feature_name in history_fc_names: history_feature_columns.append(fc) else: sparse_varlen_feature_columns.append(fc) inputs_list = list(features.values()) embedding_dict = kdd_create_embedding_matrix(dnn_feature_columns, l2_reg_embedding, init_std, seed, prefix="") query_emb_list = embedding_lookup(embedding_dict, features, sparse_feature_columns, history_feature_list, history_feature_list, to_list=True) keys_emb_list = embedding_lookup(embedding_dict, features, history_feature_columns, history_fc_names, history_fc_names, to_list=True) dnn_input_emb_list = embedding_lookup(embedding_dict, features, sparse_feature_columns, mask_feat_list=history_feature_list, to_list=True) dense_value_list = get_dense_input(features, dense_feature_columns) sequence_embed_dict = varlen_embedding_lookup( embedding_dict, features, sparse_varlen_feature_columns) sequence_embed_list = get_varlen_pooling_list( sequence_embed_dict, features, sparse_varlen_feature_columns, to_list=True) dnn_input_emb_list += sequence_embed_list keys_emb = concat_func(keys_emb_list, mask=True) deep_input_emb = concat_func(dnn_input_emb_list) query_emb = concat_func(query_emb_list, mask=True) hist = AttentionSequencePoolingLayer( att_hidden_size, att_activation, weight_normalization=att_weight_normalization, supports_masking=True)([query_emb, keys_emb]) deep_input_emb = Concatenate()([NoMask()(deep_input_emb), hist]) deep_input_emb = Flatten()(deep_input_emb) dnn_input = combined_dnn_input([deep_input_emb], dense_value_list) output = DNN(dnn_hidden_units, dnn_activation, l2_reg_dnn, dnn_dropout, dnn_use_bn, seed)(dnn_input) final_logit = Dense(1, use_bias=False)(output) output = PredictionLayer(task)(final_logit) model = Model(inputs=inputs_list, outputs=output) return model
def NCF(user_feature_columns, item_feature_columns, user_gmf_embedding_dim=20, item_gmf_embedding_dim=20, user_mlp_embedding_dim=20, item_mlp_embedding_dim=20, dnn_use_bn=False, dnn_hidden_units=(64, 16), dnn_activation='relu', l2_reg_dnn=0, l2_reg_embedding=1e-6, dnn_dropout=0, init_std=0.0001, seed=1024): """Instantiates the NCF Model architecture. :param user_feature_columns: A dict containing user's features and features'dim. :param item_feature_columns: A dict containing item's features and features'dim. :param user_gmf_embedding_dim: int. :param item_gmf_embedding_dim: int. :param user_mlp_embedding_dim: int. :param item_mlp_embedding_dim: int. :param dnn_use_bn: bool. Whether use BatchNormalization before activation or not in deep net :param dnn_hidden_units: list,list of positive integer or empty list, the layer number and units in each layer of deep net :param dnn_activation: Activation function to use in deep net :param l2_reg_dnn: float. L2 regularizer strength applied to DNN :param l2_reg_embedding: float. L2 regularizer strength applied to embedding vector :param dnn_dropout: float in [0,1), the probability we will drop out a given DNN coordinate. :param init_std: float,to use as the initialize std of embedding vector :param seed: integer ,to use as random seed. :return: A Keras model instance. """ user_dim = len(user_feature_columns) * user_gmf_embedding_dim item_dim = len(item_feature_columns) * item_gmf_embedding_dim dim = (user_dim * item_dim) / (math.gcd(user_dim, item_dim)) user_gmf_embedding_dim = int(dim / len(user_feature_columns)) item_gmf_embedding_dim = int(dim / len(item_feature_columns)) # Generalized Matrix Factorization (GMF) Part user_gmf_feature_columns = [ SparseFeat(feat, vocabulary_size=size, embedding_dim=user_gmf_embedding_dim) for feat, size in user_feature_columns.items() ] user_features = build_input_features(user_gmf_feature_columns) user_inputs_list = list(user_features.values()) user_gmf_sparse_embedding_list, user_gmf_dense_value_list = input_from_feature_columns( user_features, user_gmf_feature_columns, l2_reg_embedding, init_std, seed, prefix='gmf_') user_gmf_input = combined_dnn_input(user_gmf_sparse_embedding_list, []) user_gmf_out = Lambda(lambda x: x, name="user_gmf_embedding")(user_gmf_input) item_gmf_feature_columns = [ SparseFeat(feat, vocabulary_size=size, embedding_dim=item_gmf_embedding_dim) for feat, size in item_feature_columns.items() ] item_features = build_input_features(item_gmf_feature_columns) item_inputs_list = list(item_features.values()) item_gmf_sparse_embedding_list, item_gmf_dense_value_list = input_from_feature_columns( item_features, item_gmf_feature_columns, l2_reg_embedding, init_std, seed, prefix='gmf_') item_gmf_input = combined_dnn_input(item_gmf_sparse_embedding_list, []) item_gmf_out = Lambda(lambda x: x, name="item_gmf_embedding")(item_gmf_input) gmf_out = Multiply()([user_gmf_out, item_gmf_out]) # Multi-Layer Perceptron (MLP) Part user_mlp_feature_columns = [ SparseFeat(feat, vocabulary_size=size, embedding_dim=user_mlp_embedding_dim) for feat, size in user_feature_columns.items() ] user_mlp_sparse_embedding_list, user_mlp_dense_value_list = input_from_feature_columns( user_features, user_mlp_feature_columns, l2_reg_embedding, init_std, seed, prefix='mlp_') user_mlp_input = combined_dnn_input(user_mlp_sparse_embedding_list, user_mlp_dense_value_list) user_mlp_out = Lambda(lambda x: x, name="user_mlp_embedding")(user_mlp_input) item_mlp_feature_columns = [ SparseFeat(feat, vocabulary_size=size, embedding_dim=item_mlp_embedding_dim) for feat, size in item_feature_columns.items() ] item_mlp_sparse_embedding_list, item_mlp_dense_value_list = input_from_feature_columns( item_features, item_mlp_feature_columns, l2_reg_embedding, init_std, seed, prefix='mlp_') item_mlp_input = combined_dnn_input(item_mlp_sparse_embedding_list, item_mlp_dense_value_list) item_mlp_out = Lambda(lambda x: x, name="item_mlp_embedding")(item_mlp_input) mlp_input = Concatenate(axis=1)([user_mlp_out, item_mlp_out]) mlp_out = DNN(dnn_hidden_units, dnn_activation, l2_reg_dnn, dnn_dropout, dnn_use_bn, seed, name="mlp_embedding")(mlp_input) # Fusion of GMF and MLP neumf_input = Concatenate(axis=1)([gmf_out, mlp_out]) neumf_out = DNN(hidden_units=[1], activation='sigmoid')(neumf_input) output = Lambda(lambda x: x, name='neumf_out')(neumf_out) # output = PredictionLayer(task, False)(neumf_out) model = Model(inputs=user_inputs_list + item_inputs_list, outputs=output) return model
def MIND(user_feature_columns, item_feature_columns, num_sampled=5, k_max=2, p=1.0, dynamic_k=False, user_dnn_hidden_units=(64, 32), dnn_activation='relu', dnn_use_bn=False, l2_reg_dnn=0, l2_reg_embedding=1e-6, dnn_dropout=0, init_std=0.0001, seed=1024): """Instantiates the MIND Model architecture. :param user_feature_columns: An iterable containing user's features used by the model. :param item_feature_columns: An iterable containing item's features used by the model. :param num_sampled: int, the number of classes to randomly sample per batch. :param k_max: int, the max size of user interest embedding :param p: float,the parameter for adjusting the attention distribution in LabelAwareAttention. :param dynamic_k: bool, whether or not use dynamic interest number :param dnn_use_bn: bool. Whether use BatchNormalization before activation or not in deep net :param user_dnn_hidden_units: list,list of positive integer or empty list, the layer number and units in each layer of user tower :param dnn_activation: Activation function to use in deep net :param dnn_use_bn: bool. Whether use BatchNormalization before activation or not in deep net :param l2_reg_dnn: L2 regularizer strength applied to DNN :param l2_reg_embedding: float. L2 regularizer strength applied to embedding vector :param dnn_dropout: float in [0,1), the probability we will drop out a given DNN coordinate. :param init_std: float,to use as the initialize std of embedding vector :param seed: integer ,to use as random seed. :return: A Keras model instance. """ if len(item_feature_columns) > 1: raise ValueError("Now MIND only support 1 item feature like item_id") item_feature_column = item_feature_columns[0] item_feature_name = item_feature_column.name history_feature_list = [item_feature_name] features = build_input_features(user_feature_columns) sparse_feature_columns = list( filter(lambda x: isinstance(x, SparseFeat), user_feature_columns)) if user_feature_columns else [] dense_feature_columns = list( filter(lambda x: isinstance(x, DenseFeat), user_feature_columns)) if user_feature_columns else [] varlen_sparse_feature_columns = list( filter(lambda x: isinstance(x, VarLenSparseFeat), user_feature_columns)) if user_feature_columns else [] history_feature_columns = [] sparse_varlen_feature_columns = [] history_fc_names = list(map(lambda x: "hist_" + x, history_feature_list)) for fc in varlen_sparse_feature_columns: feature_name = fc.name if feature_name in history_fc_names: history_feature_columns.append(fc) else: sparse_varlen_feature_columns.append(fc) inputs_list = list(features.values()) embedding_dict = create_embedding_matrix(user_feature_columns + item_feature_columns, l2_reg_embedding, init_std, seed, prefix="") item_features = build_input_features(item_feature_columns) query_emb_list = embedding_lookup(embedding_dict, item_features, item_feature_columns, history_feature_list, history_feature_list, to_list=True) keys_emb_list = embedding_lookup(embedding_dict, features, history_feature_columns, history_fc_names, history_fc_names, to_list=True) dnn_input_emb_list = embedding_lookup(embedding_dict, features, sparse_feature_columns, mask_feat_list=history_feature_list, to_list=True) dense_value_list = get_dense_input(features, dense_feature_columns) sequence_embed_dict = varlen_embedding_lookup( embedding_dict, features, sparse_varlen_feature_columns) sequence_embed_list = get_varlen_pooling_list( sequence_embed_dict, features, sparse_varlen_feature_columns, to_list=True) dnn_input_emb_list += sequence_embed_list # keys_emb = concat_func(keys_emb_list, mask=True) # query_emb = concat_func(query_emb_list, mask=True) history_emb = PoolingLayer()(NoMask()(keys_emb_list)) target_emb = PoolingLayer()(NoMask()(query_emb_list)) target_emb_size = target_emb.get_shape()[-1].value max_len = history_emb.get_shape()[1].value hist_len = features['hist_len'] high_capsule = CapsuleLayer(input_units=target_emb_size, out_units=target_emb_size, max_len=max_len, k_max=k_max)((history_emb, hist_len)) if len(dnn_input_emb_list) > 0 or len(dense_value_list) > 0: user_other_feature = combined_dnn_input(dnn_input_emb_list, dense_value_list) other_feature_tile = tf.keras.layers.Lambda( tile_user_otherfeat, arguments={'k_max': k_max})(user_other_feature) user_deep_input = Concatenate()( [NoMask()(other_feature_tile), high_capsule]) else: user_deep_input = high_capsule # user_deep_input._uses_learning_phase = True # attention_score._uses_learning_phase user_embeddings = DNN(user_dnn_hidden_units, dnn_activation, l2_reg_dnn, dnn_dropout, dnn_use_bn, seed, name="user_embedding")(user_deep_input) item_inputs_list = list(item_features.values()) item_embedding = embedding_dict[item_feature_name] if dynamic_k: user_embedding_final = LabelAwareAttention( k_max=k_max, pow_p=p, )((user_embeddings, target_emb, hist_len)) else: user_embedding_final = LabelAwareAttention( k_max=k_max, pow_p=p, )((user_embeddings, target_emb)) output = SampledSoftmaxLayer(item_embedding, num_sampled=num_sampled)( inputs=(user_embedding_final, item_features[item_feature_name])) model = Model(inputs=inputs_list + item_inputs_list, outputs=output) model.__setattr__("user_input", inputs_list) model.__setattr__("user_embedding", user_embeddings) model.__setattr__("item_input", item_inputs_list) model.__setattr__( "item_embedding", get_item_embedding(item_embedding, item_features[item_feature_name])) return model
def DSSM(user_feature_columns, item_feature_columns, user_dnn_hidden_units=(64, 32), item_dnn_hidden_units=(64, 32), dnn_activation='tanh', dnn_use_bn=False, l2_reg_dnn=0, l2_reg_embedding=1e-6, dnn_dropout=0, init_std=0.0001, seed=1024, metric='cos'): embedding_matrix_dict = create_embedding_matrix(user_feature_columns + item_feature_columns, l2_reg_embedding, init_std, seed, seq_mask_zero=True) user_features = build_input_features(user_feature_columns) user_inputs_list = list(user_features.values()) user_sparse_embedding_list, user_dense_value_list = input_from_feature_columns( user_features, user_feature_columns, l2_reg_embedding, init_std, seed, embedding_matrix_dict=embedding_matrix_dict) user_dnn_input = combined_dnn_input(user_sparse_embedding_list, user_dense_value_list) item_features = build_input_features(item_feature_columns) item_inputs_list = list(item_features.values()) item_sparse_embedding_list, item_dense_value_list = input_from_feature_columns( item_features, item_feature_columns, l2_reg_embedding, init_std, seed, embedding_matrix_dict=embedding_matrix_dict) item_dnn_input = combined_dnn_input(item_sparse_embedding_list, item_dense_value_list) user_dnn_out = DNN( user_dnn_hidden_units, dnn_activation, l2_reg_dnn, dnn_dropout, dnn_use_bn, seed, )(user_dnn_input) item_dnn_out = DNN(item_dnn_hidden_units, dnn_activation, l2_reg_dnn, dnn_dropout, dnn_use_bn, seed)(item_dnn_input) score = Similarity(type=metric)([user_dnn_out, item_dnn_out]) output = PredictionLayer("binary", False)(score) model = Model(inputs=user_inputs_list + item_inputs_list, outputs=output) plot_model(model, to_file='dnn.png', show_shapes=True) print("go") model.__setattr__("user_input", user_inputs_list) model.__setattr__("item_input", item_inputs_list) model.__setattr__("user_embedding", user_dnn_out) model.__setattr__("item_embedding", item_dnn_out) return model
def DSSM(user_dnn_feature_columns, item_dnn_feature_columns, gamma=1, dnn_use_bn=True, dnn_hidden_units=(300, 300, 128), dnn_activation='tanh', l2_reg_dnn=0, l2_reg_embedding=1e-6, dnn_dropout=0, init_std=0.0001, seed=1024, task='binary'): """Instantiates the Deep Structured Semantic Model architecture. :param user_dnn_feature_columns:An iterable containing user's features used by deep part of the model. :param item_dnn_feature_columns:An iterable containing item's the features used by deep part of the model. :param gamma: smoothing factor in the softmax function for DSSM :param dnn_use_bn: bool. Whether use BatchNormalization before activation or not in deep net :param dnn_hidden_units: list,list of positive integer or empty list, the layer number and units in each layer of deep net :param dnn_activation: Activation function to use in deep net :param l2_reg_dnn: float. L2 regularizer strength applied to DNN :param l2_reg_embedding: float. L2 regularizer strength applied to embedding vector :param dnn_dropout: float in [0,1), the probability we will drop out a given DNN coordinate. :param init_std: float,to use as the initialize std of embedding vector :param seed: integer ,to use as random seed. :param task: str, ``"binary"`` for binary logloss or ``"regression"`` for regression loss :return: A Keras model instance. """ user_features = build_input_features(user_dnn_feature_columns) user_inputs_list = list(user_features.values()) user_sparse_embedding_list, user_dense_value_list = input_from_feature_columns( user_features, user_dnn_feature_columns, l2_reg_embedding, init_std, seed) user_dnn_input = combined_dnn_input(user_sparse_embedding_list, user_dense_value_list) item_features = build_input_features(item_dnn_feature_columns) item_inputs_list = list(item_features.values()) item_sparse_embedding_list, item_dense_value_list = input_from_feature_columns( item_features, item_dnn_feature_columns, l2_reg_embedding, init_std, seed) item_dnn_input = combined_dnn_input(item_sparse_embedding_list, item_dense_value_list) user_dnn_out = DNN(dnn_hidden_units, dnn_activation, l2_reg_dnn, dnn_dropout, dnn_use_bn, seed, name="user_embedding")(user_dnn_input) item_dnn_out = DNN(dnn_hidden_units, dnn_activation, l2_reg_dnn, dnn_dropout, dnn_use_bn, seed, name="item_embedding")(item_dnn_input) score = Cosine_Similarity(user_dnn_out, item_dnn_out, gamma=gamma) output = PredictionLayer(task, False)(score) model = Model(inputs=user_inputs_list + item_inputs_list, outputs=output) return model
def xDeepFM_MTL( linear_feature_columns, dnn_feature_columns, embedding_size=8, dnn_hidden_units=(256, 256), cin_layer_size=( 256, 256, ), cin_split_half=True, init_std=0.0001, l2_reg_dnn=0, dnn_dropout=0, dnn_activation='relu', dnn_use_bn=False, task_net_size=(128, ), l2_reg_linear=0.00001, l2_reg_embedding=0.00001, seed=1024, ): # check_feature_config_dict(feature_dim_dict) if len(task_net_size) < 1: raise ValueError('task_net_size must be at least one layer') features = build_input_features(linear_feature_columns + dnn_feature_columns) inputs_list = list(features.values()) sparse_embedding_list, dense_value_list = input_from_feature_columns( features, dnn_feature_columns, embedding_size, l2_reg_embedding, init_std, seed) linear_logit = get_linear_logit(features, linear_feature_columns, l2_reg=l2_reg_linear, init_std=init_std, seed=seed, prefix='linear') fm_input = concat_fun(sparse_embedding_list, axis=1) if len(cin_layer_size) > 0: exFM_out = CIN(cin_layer_size, 'relu', cin_split_half, 0, seed)(fm_input) exFM_logit = tf.keras.layers.Dense( 1, activation=None, )(exFM_out) dnn_input = combined_dnn_input(sparse_embedding_list, dense_value_list) deep_out = DNN(dnn_hidden_units, dnn_activation, l2_reg_dnn, dnn_dropout, dnn_use_bn, seed)(dnn_input) like_out = DNN(task_net_size)(deep_out) like_logit = tf.keras.layers.Dense(1, use_bias=False, activation=None)(like_out) like_logit = tf.keras.layers.add([linear_logit, like_logit, exFM_logit]) output_like = PredictionLayer('binary', name='like')(like_logit) model = tf.keras.models.Model(inputs=inputs_list, outputs=output_like) return model
def DSSM(user_feature_columns, item_feature_columns, user_dnn_hidden_units=(64, 32), item_dnn_hidden_units=(64, 32), dnn_activation='tanh', dnn_use_bn=False, l2_reg_dnn=0, l2_reg_embedding=1e-6, dnn_dropout=0, init_std=0.0001, seed=1024, metric='cos'): """Instantiates the Deep Structured Semantic Model architecture. :param user_feature_columns: An iterable containing user's features used by the model. :param item_feature_columns: An iterable containing item's features used by the model. :param user_dnn_hidden_units: list,list of positive integer or empty list, the layer number and units in each layer of user tower :param item_dnn_hidden_units: list,list of positive integer or empty list, the layer number and units in each layer of item tower :param dnn_activation: Activation function to use in deep net :param dnn_use_bn: bool. Whether use BatchNormalization before activation or not in deep net :param l2_reg_dnn: float. L2 regularizer strength applied to DNN :param l2_reg_embedding: float. L2 regularizer strength applied to embedding vector :param dnn_dropout: float in [0,1), the probability we will drop out a given DNN coordinate. :param init_std: float,to use as the initialize std of embedding vector :param seed: integer ,to use as random seed. :param metric: str, ``"cos"`` for cosine or ``"ip"`` for inner product :return: A Keras model instance. """ embedding_matrix_dict = create_embedding_matrix(user_feature_columns + item_feature_columns, l2_reg_embedding, init_std, seed, seq_mask_zero=True) user_features = build_input_features(user_feature_columns) user_inputs_list = list(user_features.values()) user_sparse_embedding_list, user_dense_value_list = input_from_feature_columns( user_features, user_feature_columns, l2_reg_embedding, init_std, seed, embedding_matrix_dict=embedding_matrix_dict) user_dnn_input = combined_dnn_input(user_sparse_embedding_list, user_dense_value_list) item_features = build_input_features(item_feature_columns) item_inputs_list = list(item_features.values()) item_sparse_embedding_list, item_dense_value_list = input_from_feature_columns( item_features, item_feature_columns, l2_reg_embedding, init_std, seed, embedding_matrix_dict=embedding_matrix_dict) item_dnn_input = combined_dnn_input(item_sparse_embedding_list, item_dense_value_list) user_dnn_out = DNN( user_dnn_hidden_units, dnn_activation, l2_reg_dnn, dnn_dropout, dnn_use_bn, seed, )(user_dnn_input) item_dnn_out = DNN(item_dnn_hidden_units, dnn_activation, l2_reg_dnn, dnn_dropout, dnn_use_bn, seed)(item_dnn_input) score = Similarity(type=metric)([user_dnn_out, item_dnn_out]) output = PredictionLayer("binary", False)(score) model = Model(inputs=user_inputs_list + item_inputs_list, outputs=output) model.__setattr__("user_input", user_inputs_list) model.__setattr__("item_input", item_inputs_list) model.__setattr__("user_embedding", user_dnn_out) model.__setattr__("item_embedding", item_dnn_out) return model
def DeepFMmmoe(linear_feature_columns, dnn_feature_columns, embedding_size=8, use_fm=True, dnn_hidden_units=(128, 128), l2_reg_linear=0.00001, l2_reg_embedding=0.00001, l2_reg_dnn=0, init_std=0.0001, seed=1024, dnn_dropout=0, dnn_activation='relu', dnn_use_bn=False, task='binary', task_net_size=(128, )): """Instantiates the DeepFM Network architecture. :param linear_feature_columns: An iterable containing all the features used by linear part of the model. :param dnn_feature_columns: An iterable containing all the features used by deep part of the model. :param embedding_size: positive integer,sparse feature embedding_size :param use_fm: bool,use FM part or not :param dnn_hidden_units: list,list of positive integer or empty list, the layer number and units in each layer of DNN :param l2_reg_linear: float. L2 regularizer strength applied to linear part :param l2_reg_embedding: float. L2 regularizer strength applied to embedding vector :param l2_reg_dnn: float. L2 regularizer strength applied to DNN :param init_std: float,to use as the initialize std of embedding vector :param seed: integer ,to use as random seed. :param dnn_dropout: float in [0,1), the probability we will drop out a given DNN coordinate. :param dnn_activation: Activation function to use in DNN :param dnn_use_bn: bool. Whether use BatchNormalization before activation or not in DNN :param task: str, ``"binary"`` for binary logloss or ``"regression"`` for regression loss :return: A Keras model instance. """ features = build_input_features(linear_feature_columns + dnn_feature_columns) inputs_list = list(features.values()) sparse_embedding_list, dense_value_list = input_from_feature_columns(features,dnn_feature_columns, embedding_size, l2_reg_embedding,init_std, seed) # linear_logit = get_linear_logit(features, linear_feature_columns, l2_reg=l2_reg_linear, init_std=init_std, # seed=seed, prefix='linear') # # fm_input = concat_fun(sparse_embedding_list, axis=1) # fm_logit = FM()(fm_input) dnn_input = combined_dnn_input(sparse_embedding_list,dense_value_list) #dnn_logit = tf.keras.layers.Dense( # 1, use_bias=False, activation=None)(dnn_out) mmoe_layers = MMoE(units=32, num_experts=4, num_tasks=2)(dnn_input) output_layers = [] target=['finish', 'like'] # Build tower layer from MMoE layer for index, task_layer in enumerate(mmoe_layers): tower_layer = Dense( units=128, activation='relu', kernel_initializer=VarianceScaling())(task_layer) output_layer = Dense( units=1, name=target[index], activation='sigmoid', kernel_initializer=VarianceScaling())(tower_layer) output_layers.append(output_layer) # finish_logit = tf.keras.layers.add( # [linear_logit, output_layers[0], fm_logit]) # like_logit = tf.keras.layers.add( # [linear_logit, output_layers[1], fm_logit]) # # output_finish = PredictionLayer(task, name='finish_')(finish_logit) # output_like = PredictionLayer(task, name='like_')(like_logit) model = tf.keras.models.Model(inputs=inputs_list, outputs=output_layers)#[output_finish, output_like]) return model