def xDeepFM_MTL( feature_dim_dict, embedding_size=8, hidden_size=(256, 256), cin_layer_size=( 256, 256, ), cin_split_half=True, task_net_size=(128, ), l2_reg_linear=0.00001, l2_reg_embedding=0.00001, seed=1024, ): check_feature_config_dict(feature_dim_dict) if len(task_net_size) < 1: raise ValueError('task_net_size must be at least one layer') print("[xdeepfm] feature_dim_dict: {}".format(feature_dim_dict)) deep_emb_list, linear_logit, inputs_list = preprocess_input_embedding( feature_dim_dict, embedding_size, l2_reg_embedding, l2_reg_linear, 0.0001, seed) print("[xdeepfm] deep_emb_list:", deep_emb_list) print("[xdeepfm] linear_logit:", linear_logit) print("[xdeepfm] inputs_list:", inputs_list) # video_input = tf.keras.layers.Input((128,)) # inputs_list.append(video_input) fm_input = concat_fun(deep_emb_list, axis=1) if len(cin_layer_size) > 0: exFM_out = CIN(cin_layer_size, 'relu', cin_split_half, seed)(fm_input) exFM_logit = tf.keras.layers.Dense( 1, activation=None, )(exFM_out) deep_input = tf.keras.layers.Flatten()(fm_input) deep_out = MLP(hidden_size)(deep_input) finish_out = MLP(task_net_size)(deep_out) finish_logit = tf.keras.layers.Dense(1, use_bias=False, activation=None)(finish_out) like_out = MLP(task_net_size)(deep_out) like_logit = tf.keras.layers.Dense(1, use_bias=False, activation=None)(like_out) finish_logit = tf.keras.layers.add( [linear_logit, finish_logit, exFM_logit]) like_logit = tf.keras.layers.add([linear_logit, like_logit, exFM_logit]) output_finish = PredictionLayer('sigmoid', name='finish')(finish_logit) output_like = PredictionLayer('sigmoid', name='like')(like_logit) model = tf.keras.models.Model(inputs=inputs_list, outputs=[output_finish, output_like]) return model
def myAutoInt( feature_dim_dict, embedding_size=8, att_layer_num=3, att_embedding_size=8, att_head_num=4, att_res=True, hidden_size=(256, 256), activation='relu', l2_reg_deep=0, l2_reg_embedding=1e-5, use_bn=False, keep_prob=1.0, init_std=0.0001, seed=1024, final_activation='sigmoid', ): if len(hidden_size) <= 0 and att_layer_num <= 0: raise ValueError("Either hidden_layer or att_layer_num must > 0") check_feature_config_dict(feature_dim_dict) deep_emb_list, _, inputs_list = preprocess_input_embedding( feature_dim_dict, embedding_size, l2_reg_embedding, 0, init_std, seed, False) att_input = concat_fun(deep_emb_list, axis=1) for _ in range(att_layer_num): att_input = InteractingLayer(att_embedding_size, att_head_num, att_res)(att_input) att_output = tf.keras.layers.Flatten()(att_input) deep_input = tf.keras.layers.Flatten()(concat_fun(deep_emb_list)) deep_out = MLP(hidden_size, activation, l2_reg_deep, keep_prob, use_bn, seed)(deep_input) finish_out = tf.keras.layers.Concatenate()([att_output, deep_out]) finish_logit = tf.keras.layers.Dense(1, use_bias=False, activation=None)(finish_out) like_out = tf.keras.layers.Concatenate()([att_output, deep_out]) like_logit = tf.keras.layers.Dense(1, use_bias=False, activation=None)(like_out) output_finish = PredictionLayer(final_activation, name='finish')(finish_logit) output_like = PredictionLayer(final_activation, name='like')(like_logit) model = tf.keras.models.Model(inputs=inputs_list, outputs=[output_finish, output_like]) return model
def DeepFM2(linear_feature_columns, dnn_feature_columns, fm_group=[DEFAULT_GROUP_NAME], dnn_hidden_units=(128, 128), l2_reg_linear=0.00001, l2_reg_embedding=0.00001, l2_reg_dnn=0, seed=1024, dnn_dropout=0, dnn_activation='relu', dnn_use_bn=False, task='binary', use_attention=True, attention_factor=8, l2_reg_att=1e-5, afm_dropout=0): """Instantiates the DeepFM Network architecture. :param afm_dropout: :param l2_reg_att: :param attention_factor: :param use_attention: :param linear_feature_columns: An iterable containing all the features used by linear part of the model. :param dnn_feature_columns: An iterable containing all the features used by deep part of the model. :param fm_group: list, group_name of features that will be used to do feature interactions. :param dnn_hidden_units: list,list of positive integer or empty list, the layer number and units in each layer of DNN :param l2_reg_linear: float. L2 regularizer strength applied to linear part :param l2_reg_embedding: float. L2 regularizer strength applied to embedding vector :param l2_reg_dnn: float. L2 regularizer strength applied to DNN :param seed: integer ,to use as random seed. :param dnn_dropout: float in [0,1), the probability we will drop out a given DNN coordinate. :param dnn_activation: Activation function to use in DNN :param dnn_use_bn: bool. Whether use BatchNormalization before activation or not in DNN :param task: str, ``"binary"`` for binary logloss or ``"regression"`` for regression loss :return: A Keras model instance. """ features = build_input_features( linear_feature_columns + dnn_feature_columns) inputs_list = list(features.values()) linear_logit = get_linear_logit(features, linear_feature_columns, seed=seed, prefix='linear', l2_reg=l2_reg_linear) group_embedding_dict, dense_value_list = input_from_feature_columns(features, dnn_feature_columns, l2_reg_embedding, seed, support_group=True) if use_attention: fm_logit = add_func([AFMLayer(attention_factor, l2_reg_att, afm_dropout, seed)(list(v)) for k, v in group_embedding_dict.items() if k in fm_group]) else: fm_logit = add_func([FM()(concat_func(v, axis=1)) for k, v in group_embedding_dict.items() if k in fm_group]) # fm_logit = add_func([FM()(concat_func(v, axis=1)) # for k, v in group_embedding_dict.items() if k in fm_group]) dnn_input = combined_dnn_input(list(chain.from_iterable( group_embedding_dict.values())), dense_value_list) dnn_output = DNN(dnn_hidden_units, dnn_activation, l2_reg_dnn, dnn_dropout, dnn_use_bn, seed=seed)(dnn_input) dnn_logit = tf.keras.layers.Dense( 1, use_bias=False, kernel_initializer=tf.keras.initializers.glorot_normal(seed=seed))(dnn_output) final_logit = add_func([linear_logit, fm_logit, dnn_logit]) output = PredictionLayer(task)(final_logit) model = tf.keras.models.Model(inputs=inputs_list, outputs=output) return model
def WideDeep(): # embedding_size=8 hidden_size = (128, 128) l2_reg_linear = 1e-5 l2_reg_embedding = 1e-5 l2_reg_deep = 0 init_std = 0.0001 seed = 1024 keep_prob = 1 activation = 'relu' final_activation = 'relu' wide_features = pd.read_csv('data/path_matrix.txt', sep=' ', header=None, nrows=1) deep_features = pd.read_csv('data/sns_dense.csv', sep=',', header=0, nrows=2) wide_input = Input(shape=(wide_features.shape[1], ), name='wide_' + str(wide_features.shape[1])) wide_term = Dense(1, use_bias=False, activation=None)(wide_input) deep_input = deep_features.iloc[:, :-1] deep_feats = { feat: Input(shape=(1, ), name=feat + '_' + str(i)) for i, feat in enumerate(deep_input) } deep_list = [v for v in deep_feats.values()] deep_input = Concatenate()(deep_list) deep_input = Flatten()(deep_input) # hidden_size, activation='relu', l2_reg=0, keep_prob=1, use_bn=False, seed=1024 deep_out = MLP(hidden_size=hidden_size, activation=activation, l2_reg=l2_reg_deep, keep_prob=keep_prob, use_bn=False, seed=seed)(deep_input) deep_logit = Dense(1, use_bias=False, activation=None)(deep_out) final_logit = add([deep_logit, wide_term]) output = PredictionLayer(final_activation)(final_logit) deep_list.append(wide_input) model = Model(inputs=deep_list, outputs=output) model.summary() keras.utils.plot_model(model, to_file='image/widedeep_model.png') return model
def FM(user_feature_columns, item_feature_columns, l2_reg_embedding=1e-6, init_std=0.0001, seed=1024, metric='cos'): """Instantiates the FM architecture. :param user_feature_columns: An iterable containing user's features used by the model. :param item_feature_columns: An iterable containing item's features used by the model. :param l2_reg_embedding: float. L2 regularizer strength applied to embedding vector :param init_std: float,to use as the initialize std of embedding vector :param seed: integer ,to use as random seed. :param metric: str, ``"cos"`` for cosine or ``"ip"`` for inner product :return: A Keras model instance. """ embedding_matrix_dict = create_embedding_matrix(user_feature_columns + item_feature_columns, l2_reg_embedding, init_std, seed, seq_mask_zero=True) user_features = build_input_features(user_feature_columns) user_inputs_list = list(user_features.values()) user_sparse_embedding_list, user_dense_value_list = input_from_feature_columns(user_features, user_feature_columns, l2_reg_embedding, init_std, seed, support_dense=False, embedding_matrix_dict=embedding_matrix_dict) item_features = build_input_features(item_feature_columns) item_inputs_list = list(item_features.values()) item_sparse_embedding_list, item_dense_value_list = input_from_feature_columns(item_features, item_feature_columns, l2_reg_embedding, init_std, seed, support_dense=False, embedding_matrix_dict=embedding_matrix_dict) user_dnn_input = concat_func(user_sparse_embedding_list, axis=1) user_vector_sum = Lambda(lambda x: reduce_sum(x, axis=1, keep_dims=False))(user_dnn_input) item_dnn_input = concat_func(item_sparse_embedding_list, axis=1) item_vector_sum = Lambda(lambda x: reduce_sum(x, axis=1, keep_dims=False))(item_dnn_input) score = Similarity(type=metric)([user_vector_sum, item_vector_sum]) output = PredictionLayer("binary", False)(score) model = Model(inputs=user_inputs_list + item_inputs_list, outputs=output) model.__setattr__("user_input", user_inputs_list) model.__setattr__("user_embedding", user_vector_sum) model.__setattr__("item_input", item_inputs_list) model.__setattr__("item_embedding", item_vector_sum) return model
def MMOE(dnn_feature_columns, num_tasks, tasks, num_experts=4, expert_dim=8, dnn_hidden_units=(128, 128), l2_reg_embedding=1e-5, l2_reg_dnn=0, task_dnn_units=None, seed=1024, dnn_dropout=0, dnn_activation='relu'): """Instantiates the Multi-gate Mixture-of-Experts architecture. :param dnn_feature_columns: An iterable containing all the features used by deep part of the model. :param num_tasks: integer, number of tasks, equal to number of outputs, must be greater than 1. :param tasks: list of str, indicating the loss of each tasks, ``"binary"`` for binary logloss, ``"regression"`` for regression loss. e.g. ['binary', 'regression'] :param num_experts: integer, number of experts. :param expert_dim: integer, the hidden units of each expert. :param dnn_hidden_units: list,list of positive integer or empty list, the layer number and units in each layer of shared-bottom DNN :param l2_reg_embedding: float. L2 regularizer strength applied to embedding vector :param l2_reg_dnn: float. L2 regularizer strength applied to DNN :param task_dnn_units: list,list of positive integer or empty list, the layer number and units in each layer of task-specific DNN :param seed: integer ,to use as random seed. :param dnn_dropout: float in [0,1), the probability we will drop out a given DNN coordinate. :param dnn_activation: Activation function to use in DNN :return: a Keras model instance """ if num_tasks <= 1: raise ValueError("num_tasks must be greater than 1") if len(tasks) != num_tasks: raise ValueError("num_tasks must be equal to the length of tasks") for task in tasks: if task not in ['binary', 'regression']: raise ValueError("task must be binary or regression, {} is illegal".format(task)) features = build_input_features(dnn_feature_columns) inputs_list = list(features.values()) sparse_embedding_list, dense_value_list = input_from_feature_columns(features, dnn_feature_columns, l2_reg_embedding, seed) dnn_input = combined_dnn_input(sparse_embedding_list, dense_value_list) dnn_out = DNN(dnn_hidden_units, dnn_activation, l2_reg_dnn, dnn_dropout, False, seed=seed)(dnn_input) mmoe_outs = MMOELayer(num_tasks, num_experts, expert_dim)(dnn_out) if task_dnn_units != None: mmoe_outs = [DNN(task_dnn_units, dnn_activation, l2_reg_dnn, dnn_dropout, False, seed)(mmoe_out) for mmoe_out in mmoe_outs] task_outputs = [] for mmoe_out, task in zip(mmoe_outs, tasks): logit = tf.keras.layers.Dense( 1, use_bias=False, activation=None)(mmoe_out) output = PredictionLayer(task)(logit) task_outputs.append(output) model = tf.keras.models.Model(inputs=inputs_list, outputs=task_outputs) return model
def LR(linear_feature_columns, dnn_feature_columns, dnn_hidden_units=(128, 128), l2_reg_linear=1e-5, l2_reg_embedding=1e-5, l2_reg_dnn=0, init_std=0.0001, seed=1024, dnn_dropout=0, dnn_activation='relu', task='binary'): """Instantiates the Wide&Deep Learning architecture. :param linear_feature_columns: An iterable containing all the features used by linear part of the model. :param dnn_feature_columns: An iterable containing all the features used by deep part of the model. :param dnn_hidden_units: list,list of positive integer or empty list, the layer number and units in each layer of DNN :param l2_reg_linear: float. L2 regularizer strength applied to wide part :param l2_reg_embedding: float. L2 regularizer strength applied to embedding vector :param l2_reg_dnn: float. L2 regularizer strength applied to DNN :param init_std: float,to use as the initialize std of embedding vector :param seed: integer ,to use as random seed. :param dnn_dropout: float in [0,1), the probability we will drop out a given DNN coordinate. :param dnn_activation: Activation function to use in DNN :param task: str, ``"binary"`` for binary logloss or ``"regression"`` for regression loss :return: A Keras model instance. """ features = build_input_features(linear_feature_columns + dnn_feature_columns) inputs_list = list(features.values()) linear_logit = get_linear_logit(features, linear_feature_columns, init_std=init_std, seed=seed, prefix='linear', l2_reg=l2_reg_linear) output = PredictionLayer(task)(linear_logit) model = Model(inputs=inputs_list, outputs=output) return model
def MT_xDeepFM(linear_feature_columns, dnn_feature_columns, embedding_size=8, dnn_hidden_units=(256, 256), cin_layer_size=(128, 128,), cin_split_half=True, cin_activation='relu', l2_reg_linear=0.00001, l2_reg_embedding=0.00001, l2_reg_dnn=0, l2_reg_cin=0, init_std=0.0001, seed=1024, dnn_dropout=0, dnn_activation='relu', dnn_use_bn=False, task='binary'): """Instantiates the xDeepFM architecture. :param flag_columns: :param linear_feature_columns: An iterable containing all the features used by linear part of the model. :param dnn_feature_columns: An iterable containing all the features used by deep part of the model. :param embedding_size: positive integer,sparse feature embedding_size :param dnn_hidden_units: list,list of positive integer or empty list, the layer number and units in each layer of deep net :param cin_layer_size: list,list of positive integer or empty list, the feature maps in each hidden layer of Compressed Interaction Network :param cin_split_half: bool.if set to True, half of the feature maps in each hidden will connect to output unit :param cin_activation: activation function used on feature maps :param l2_reg_linear: float. L2 regularizer strength applied to linear part :param l2_reg_embedding: L2 regularizer strength applied to embedding vector :param l2_reg_dnn: L2 regularizer strength applied to deep net :param l2_reg_cin: L2 regularizer strength applied to CIN. :param init_std: float,to use as the initialize std of embedding vector :param seed: integer ,to use as random seed. :param dnn_dropout: float in [0,1), the probability we will drop out a given DNN coordinate. :param dnn_activation: Activation function to use in DNN :param dnn_use_bn: bool. Whether use BatchNormalization before activation or not in DNN :param task: str, ``"binary"`` for binary logloss or ``"regression"`` for regression loss :return: A Keras model instance. """ features = build_input_features(linear_feature_columns + dnn_feature_columns) inputs_list = list(features.values()) sparse_embedding_list, dense_value_list = input_from_feature_columns(features,dnn_feature_columns, embedding_size, l2_reg_embedding,init_std, seed) linear_logit = get_linear_logit(features, linear_feature_columns, l2_reg=l2_reg_linear, init_std=init_std, seed=seed, prefix='linear') fm_input = concat_fun(sparse_embedding_list, axis=1) if len(cin_layer_size) > 0: exFM_out = CIN(cin_layer_size, cin_activation, cin_split_half, l2_reg_cin, seed)(fm_input) exFM_logit = tf.keras.layers.Dense(4, activation=None, )(exFM_out) dnn_input = combined_dnn_input(sparse_embedding_list,dense_value_list) deep_out = DNN(dnn_hidden_units, dnn_activation, l2_reg_dnn, dnn_dropout, dnn_use_bn, seed)(dnn_input) deep_logit = tf.keras.layers.Dense( 4, use_bias=False, activation=None)(deep_out) if len(dnn_hidden_units) == 0 and len(cin_layer_size) == 0: # only linear final_logit = linear_logit elif len(dnn_hidden_units) == 0 and len(cin_layer_size) > 0: # linear + CIN final_logit = tf.keras.layers.add([linear_logit, exFM_logit]) elif len(dnn_hidden_units) > 0 and len(cin_layer_size) == 0: # linear + Deep final_logit = tf.keras.layers.add([linear_logit, deep_logit]) elif len(dnn_hidden_units) > 0 and len(cin_layer_size) > 0: # linear + CIN + Deep final_logit = tf.keras.layers.add( [linear_logit, deep_logit, exFM_logit]) else: raise NotImplementedError output_units = PredictionLayer(task)(final_logit) # output = None # for i in range(len(flag_columns)): # print(i) # selected_index = [0, 1] if flag_columns[i] else [2, 3] # if output != None: # output = tf.concat([output, tf.reshape(tf.gather(output_units[i, :], selected_index), (1, -1))], axis=0) # else: # output = tf.reshape(tf.gather(output_units[i, :], selected_index), (1, -1)) finish = tf.cast(1-features['u_region_id'], dtype=tf.float32)*output_units[:,0]+\ tf.cast(features['u_region_id'], dtype=tf.float32)*output_units[:,1] like = tf.cast(1-features['u_region_id'], dtype=tf.float32)*output_units[:,2]+\ tf.cast(1-features['u_region_id'], dtype=tf.float32)*output_units[:,3] # mask = tf.cond(pred=tf.equal(features['u_region_id'], tf.constant(value = 1, dtype = tf.int32)), # true_fn=lambda: [True, True, False, False], false_fn=lambda: [False, False, True, True]) # output = tf.reshape(tf.boolean_mask(output_units, mask), shape=[-1, 2]) # finish = output[:, 0] # like = output[:, 1] # print(output) model = tf.keras.models.Model(inputs=inputs_list, outputs=[finish, like]) return model
def MTL_with_Title( feature_dim_dict, embedding_size=8, hidden_size=(256, 256), cin_layer_size=( 256, 256, ), cin_split_half=True, task_net_size=(128, ), l2_reg_linear=0.00001, l2_reg_embedding=0.00001, seed=1024, ): check_feature_config_dict(feature_dim_dict) if len(task_net_size) < 1: raise ValueError('task_net_size must be at least one layer') # xDeepFM Model deep_emb_list, linear_logit, inputs_list = preprocess_input_embedding( feature_dim_dict, embedding_size, l2_reg_embedding, l2_reg_linear, 0.0001, seed) fm_input = concat_fun(deep_emb_list, axis=1) if len(cin_layer_size) > 0: exFM_out = CIN(cin_layer_size, 'relu', cin_split_half, seed)(fm_input) exFM_logit = tf.keras.layers.Dense( 1, activation=None, )(exFM_out) deep_input = tf.keras.layers.Flatten()(fm_input) deep_out = MLP(hidden_size)(deep_input) finish_out = MLP(task_net_size)(deep_out) finish_logit = tf.keras.layers.Dense(1, use_bias=False, activation=None)(finish_out) like_out = MLP(task_net_size)(deep_out) like_logit = tf.keras.layers.Dense(1, use_bias=False, activation=None)(like_out) # Add Title Features title_input = Input(shape=(35, ), dtype='int32', name='title_input') title_embedding = Embedding(output_dim=32, input_dim=134545, input_length=35)(title_input) lstm_out = LSTM(units=32, return_sequences=True)(title_embedding) avg_out = GlobalAveragePooling1D()(lstm_out) dense1 = Dense(32, activation='relu')(avg_out) dense2 = Dense(1, activation='relu')(dense1) # finish_logit = tf.keras.layers.add( [linear_logit, finish_logit, exFM_logit, dense2]) like_logit = tf.keras.layers.add( [linear_logit, like_logit, exFM_logit, dense2]) output_finish = PredictionLayer('sigmoid', name='finish')(finish_logit) output_like = PredictionLayer('sigmoid', name='like')(like_logit) print(str(inputs_list)) inputs_list.append(title_input) model = tf.keras.models.Model(inputs=inputs_list, outputs=[output_finish, output_like]) return model
def DeepFM(feature_dim_dict, attention_feature_name=None, embedding_size=8, use_fm=True, dnn_hidden_units=(128, 128), l2_reg_linear=0.00001, l2_reg_embedding=0.00001, l2_reg_dnn=0, init_std=0.0001, seed=1024, dnn_dropout=0, dnn_activation='relu', dnn_use_bn=False, task='binary'): """Instantiates the DeepFM Network architecture. :param feature_dim_dict: dict,to indicate sparse field and dense field like {'sparse':{'field_1':4,'field_2':3,'field_3':2},'dense':['field_4','field_5']} :param embedding_size: positive integer,sparse feature embedding_size :param use_fm: bool,use FM part or not :param dnn_hidden_units: list,list of positive integer or empty list, the layer number and units in each layer of DNN :param l2_reg_linear: float. L2 regularizer strength applied to linear part :param l2_reg_embedding: float. L2 regularizer strength applied to embedding vector :param l2_reg_dnn: float. L2 regularizer strength applied to DNN :param init_std: float,to use as the initialize std of embedding vector :param seed: integer ,to use as random seed. :param dnn_dropout: float in [0,1), the probability we will drop out a given DNN coordinate. :param dnn_activation: Activation function to use in DNN :param dnn_use_bn: bool. Whether use BatchNormalization before activation or not in DNN :param task: str, ``"binary"`` for binary logloss or ``"regression"`` for regression loss :return: A Keras model instance. """ check_feature_config_dict(feature_dim_dict) deep_emb_list, linear_emb_list, dense_input_dict, inputs_list = \ preprocess_input_embedding(feature_dim_dict, embedding_size, l2_reg_embedding, l2_reg_linear, init_std, seed, create_linear_weight=True, use_var_attention=( True if attention_feature_name else False), attention_feature_name=attention_feature_name) linear_logit = get_linear_logit(linear_emb_list, dense_input_dict, l2_reg_linear) fm_input = concat_fun(deep_emb_list, axis=1) deep_input = tf.keras.layers.Flatten()(fm_input) fm_out = FM()(fm_input) deep_out = DNN(dnn_hidden_units, dnn_activation, l2_reg_dnn, dnn_dropout, dnn_use_bn, seed)(deep_input) deep_logit = tf.keras.layers.Dense(1, use_bias=False, activation=None)(deep_out) if len(dnn_hidden_units) == 0 and use_fm == False: # only linear final_logit = linear_logit elif len(dnn_hidden_units) == 0 and use_fm == True: # linear + FM final_logit = tf.keras.layers.add([linear_logit, fm_out]) elif len(dnn_hidden_units) > 0 and use_fm == False: # linear + Deep final_logit = tf.keras.layers.add([linear_logit, deep_logit]) elif len(dnn_hidden_units) > 0 and use_fm == True: # linear + FM + Deep final_logit = tf.keras.layers.add([linear_logit, fm_out, deep_logit]) else: raise NotImplementedError output = PredictionLayer(task)(final_logit) model = tf.keras.models.Model(inputs=inputs_list, outputs=output) return model
def KDD_DIN(dnn_feature_columns, history_feature_list, dnn_use_bn=False, dnn_hidden_units=(200, 80), dnn_activation='relu', att_hidden_size=(80, 40), att_activation="dice", att_weight_normalization=False, l2_reg_dnn=0, l2_reg_embedding=1e-6, dnn_dropout=0, init_std=0.0001, seed=1024, task='binary'): """Instantiates the Deep Interest Network architecture. :param dnn_feature_columns: An iterable containing all the features used by deep part of the model. :param history_feature_list: list,to indicate sequence sparse field :param dnn_use_bn: bool. Whether use BatchNormalization before activation or not in deep net :param dnn_hidden_units: list,list of positive integer or empty list, the layer number and units in each layer of deep net :param dnn_activation: Activation function to use in deep net :param att_hidden_size: list,list of positive integer , the layer number and units in each layer of attention net :param att_activation: Activation function to use in attention net :param att_weight_normalization: bool.Whether normalize the attention score of local activation unit. :param l2_reg_dnn: float. L2 regularizer strength applied to DNN :param l2_reg_embedding: float. L2 regularizer strength applied to embedding vector :param dnn_dropout: float in [0,1), the probability we will drop out a given DNN coordinate. :param init_std: float,to use as the initialize std of embedding vector :param seed: integer ,to use as random seed. :param task: str, ``"binary"`` for binary logloss or ``"regression"`` for regression loss :return: A Keras model instance. """ features = build_input_features(dnn_feature_columns) sparse_feature_columns = list( filter(lambda x: isinstance(x, SparseFeat), dnn_feature_columns)) if dnn_feature_columns else [] dense_feature_columns = list( filter(lambda x: isinstance(x, DenseFeat), dnn_feature_columns)) if dnn_feature_columns else [] varlen_sparse_feature_columns = list( filter(lambda x: isinstance(x, VarLenSparseFeat), dnn_feature_columns)) if dnn_feature_columns else [] history_feature_columns = [] sparse_varlen_feature_columns = [] history_fc_names = list(map(lambda x: "hist_" + x, history_feature_list)) for fc in varlen_sparse_feature_columns: feature_name = fc.name if feature_name in history_fc_names: history_feature_columns.append(fc) else: sparse_varlen_feature_columns.append(fc) inputs_list = list(features.values()) embedding_dict = kdd_create_embedding_matrix(dnn_feature_columns, l2_reg_embedding, init_std, seed, prefix="") query_emb_list = embedding_lookup(embedding_dict, features, sparse_feature_columns, history_feature_list, history_feature_list, to_list=True) keys_emb_list = embedding_lookup(embedding_dict, features, history_feature_columns, history_fc_names, history_fc_names, to_list=True) dnn_input_emb_list = embedding_lookup(embedding_dict, features, sparse_feature_columns, mask_feat_list=history_feature_list, to_list=True) dense_value_list = get_dense_input(features, dense_feature_columns) sequence_embed_dict = varlen_embedding_lookup( embedding_dict, features, sparse_varlen_feature_columns) sequence_embed_list = get_varlen_pooling_list( sequence_embed_dict, features, sparse_varlen_feature_columns, to_list=True) dnn_input_emb_list += sequence_embed_list keys_emb = concat_func(keys_emb_list, mask=True) deep_input_emb = concat_func(dnn_input_emb_list) query_emb = concat_func(query_emb_list, mask=True) hist = AttentionSequencePoolingLayer( att_hidden_size, att_activation, weight_normalization=att_weight_normalization, supports_masking=True)([query_emb, keys_emb]) deep_input_emb = Concatenate()([NoMask()(deep_input_emb), hist]) deep_input_emb = Flatten()(deep_input_emb) dnn_input = combined_dnn_input([deep_input_emb], dense_value_list) output = DNN(dnn_hidden_units, dnn_activation, l2_reg_dnn, dnn_dropout, dnn_use_bn, seed)(dnn_input) final_logit = Dense(1, use_bias=False)(output) output = PredictionLayer(task)(final_logit) model = Model(inputs=inputs_list, outputs=output) return model
def DSSM(user_feature_columns, item_feature_columns, user_dnn_hidden_units=(64, 32), item_dnn_hidden_units=(64, 32), dnn_activation='tanh', dnn_use_bn=False, l2_reg_dnn=0, l2_reg_embedding=1e-6, dnn_dropout=0, init_std=0.0001, seed=1024, metric='cos'): embedding_matrix_dict = create_embedding_matrix(user_feature_columns + item_feature_columns, l2_reg_embedding, init_std, seed, seq_mask_zero=True) user_features = build_input_features(user_feature_columns) user_inputs_list = list(user_features.values()) user_sparse_embedding_list, user_dense_value_list = input_from_feature_columns( user_features, user_feature_columns, l2_reg_embedding, init_std, seed, embedding_matrix_dict=embedding_matrix_dict) user_dnn_input = combined_dnn_input(user_sparse_embedding_list, user_dense_value_list) item_features = build_input_features(item_feature_columns) item_inputs_list = list(item_features.values()) item_sparse_embedding_list, item_dense_value_list = input_from_feature_columns( item_features, item_feature_columns, l2_reg_embedding, init_std, seed, embedding_matrix_dict=embedding_matrix_dict) item_dnn_input = combined_dnn_input(item_sparse_embedding_list, item_dense_value_list) user_dnn_out = DNN( user_dnn_hidden_units, dnn_activation, l2_reg_dnn, dnn_dropout, dnn_use_bn, seed, )(user_dnn_input) item_dnn_out = DNN(item_dnn_hidden_units, dnn_activation, l2_reg_dnn, dnn_dropout, dnn_use_bn, seed)(item_dnn_input) score = Similarity(type=metric)([user_dnn_out, item_dnn_out]) output = PredictionLayer("binary", False)(score) model = Model(inputs=user_inputs_list + item_inputs_list, outputs=output) plot_model(model, to_file='dnn.png', show_shapes=True) print("go") model.__setattr__("user_input", user_inputs_list) model.__setattr__("item_input", item_inputs_list) model.__setattr__("user_embedding", user_dnn_out) model.__setattr__("item_embedding", item_dnn_out) return model
def xDeepFM_MTL( linear_feature_columns, dnn_feature_columns, gate_feature_columns, embedding_size=8, dnn_hidden_units=(256, 256), cin_layer_size=( 256, 256, ), cin_split_half=True, init_std=0.0001, l2_reg_dnn=0, dnn_dropout=0, dnn_activation='relu', dnn_use_bn=False, task_net_size=(128, ), l2_reg_linear=0.00001, l2_reg_embedding=0.00001, seed=1024, ): # check_feature_config_dict(feature_dim_dict) if len(task_net_size) < 1: raise ValueError('task_net_size must be at least one layer') features = build_input_features(linear_feature_columns + dnn_feature_columns + gate_feature_columns) inputs_list = list(features.values()) sparse_embedding_list, dense_value_list = input_from_feature_columns( features, dnn_feature_columns, embedding_size, l2_reg_embedding, init_std, seed) gate = get_dense_input(features, gate_feature_columns)[0] linear_logit = get_linear_logit(features, linear_feature_columns, l2_reg=l2_reg_linear, init_std=init_std, seed=seed, prefix='linear') fm_input = concat_fun(sparse_embedding_list, axis=1) if len(cin_layer_size) > 0: exFM_out = CIN(cin_layer_size, 'relu', cin_split_half, 0, seed)(fm_input) exFM_logit = tf.keras.layers.Dense( 1, activation=None, )(exFM_out) # dnn_input = combined_dnn_input(sparse_embedding_list, dense_value_list) dnn_input = tf.keras.layers.Flatten()(fm_input) deep_out = DNN(dnn_hidden_units, dnn_activation, l2_reg_dnn, dnn_dropout, dnn_use_bn, seed)(dnn_input) finish_out1 = DNN(task_net_size)(deep_out) finish_logit1 = tf.keras.layers.Dense(1, use_bias=False, activation=None)(finish_out1) like_out1 = DNN(task_net_size)(deep_out) like_logit1 = tf.keras.layers.Dense(1, use_bias=False, activation=None)(like_out1) finish_out2 = DNN(task_net_size)(deep_out) finish_logit2 = tf.keras.layers.Dense(1, use_bias=False, activation=None)(finish_out2) like_out2 = DNN(task_net_size)(deep_out) like_logit2 = tf.keras.layers.Dense(1, use_bias=False, activation=None)(like_out2) # condition = tf.placeholder("float32", shape=[None, 1], name="condition") finish_logit = gate * finish_logit1 + (1.0 - gate) * finish_logit2 like_logit = gate * like_logit1 + (1.0 - gate) * like_logit2 print(np.shape(like_logit)) finish_logit = tf.keras.layers.add( [linear_logit, finish_logit, exFM_logit]) like_logit = tf.keras.layers.add([linear_logit, like_logit, exFM_logit]) output_finish = PredictionLayer('binary', name='finish')(finish_logit) output_like = PredictionLayer('binary', name='like')(like_logit) model = tf.keras.models.Model(inputs=inputs_list, outputs=[output_finish, output_like]) return model
def CapsuleNet(feature_dim_dict, seq_feature_list, embedding_size=8, hist_len_max=50, use_bn=False, dnn_hidden_units=(200, 80), dnn_activation='sigmoid', num_capsule=8, dim_capsule=2, routing_iterations=3, att_hidden_size=(64, 16), att_activation="dice", att_weight_normalization=True, att_embedding_size=1, att_head_num=8, l2_reg_dnn=0, l2_reg_embedding=1e-6, dnn_dropout=0, init_std=0.0001, alpha=1e-6, seed=1024, task='binary'): check_feature_config_dict(feature_dim_dict) sparse_input, dense_input, user_behavior_input, user_behavior_length = get_input( feature_dim_dict, seq_feature_list, hist_len_max) sparse_embedding_dict = { feat.name: Embedding(feat.dimension, embedding_size, embeddings_initializer=RandomNormal(mean=0.0, stddev=init_std, seed=seed), embeddings_regularizer=l2(l2_reg_embedding), name='sparse_emb_' + str(i) + '-' + feat.name, mask_zero=(feat.name in seq_feature_list)) for i, feat in enumerate(feature_dim_dict["sparse"]) } query_emb_list = get_embedding_vec_list(sparse_embedding_dict, sparse_input, feature_dim_dict["sparse"], return_feat_list=seq_feature_list) keys_emb_list = get_embedding_vec_list(sparse_embedding_dict, user_behavior_input, feature_dim_dict['sparse'], return_feat_list=seq_feature_list) deep_input_emb_list = get_embedding_vec_list(sparse_embedding_dict, sparse_input, feature_dim_dict['sparse']) query_emb = concat_fun(query_emb_list) keys_emb = concat_fun(keys_emb_list) scores = AttentionSequencePoolingLayer( att_hidden_units=att_hidden_size, att_activation=att_activation, weight_normalization=att_weight_normalization, return_score=True)([query_emb, keys_emb, user_behavior_length]) Self_Attention = Transformer(att_embedding_size, att_head_num, dropout_rate=0, use_layer_norm=True, use_positional_encoding=True, seed=seed, supports_masking=False, blinding=True) keys_emb = Self_Attention( [keys_emb, keys_emb, user_behavior_length, user_behavior_length]) cap = Capsule(num_capsule=num_capsule, dim_capsule=dim_capsule, routings=routing_iterations, share_weights=True, supports_masking=True) hist_cap = cap(keys_emb, scores=scores) disp_loss = get_disp_loss(hist_cap) hist_cap = Reshape([1, num_capsule * dim_capsule])(NoMask()(hist_cap)) deep_input_emb = concat_fun(deep_input_emb_list) deep_input_emb = Concatenate()([deep_input_emb, hist_cap]) deep_input_emb = tf.keras.layers.Flatten()(NoMask()(deep_input_emb)) if len(dense_input) > 0: deep_input_emb = Concatenate()([deep_input_emb] + list(dense_input.values())) output = DNN(dnn_hidden_units, dnn_activation, l2_reg_dnn, dnn_dropout, use_bn, seed)(deep_input_emb) final_logit = Dense(1, use_bias=False)(output) output = PredictionLayer(task)(final_logit) model_input_list = get_inputs_list( [sparse_input, dense_input, user_behavior_input]) model_input_list += [user_behavior_length] model = tf.keras.models.Model(inputs=model_input_list, outputs=output) model.add_loss(alpha * disp_loss) tf.keras.backend.get_session().run(tf.global_variables_initializer()) return model
def M(emb1, emb1_label, emb2, emb2_label, emb3, emb3_label, emb4, emb4_label, emb5, emb5_label, linear_feature_columns, dnn_feature_columns, fm_group=[DEFAULT_GROUP_NAME], dnn_hidden_units=(128, 128), l2_reg_linear=0.00001, l2_reg_embedding=0.00001, l2_reg_dnn=0, seed=1024, dnn_dropout=0, dnn_activation='relu', dnn_use_bn=False, task='binary'): #!################################################################################################################ feed_forward_size_trans_1 = 2048 max_seq_len_trans_1 = 40 model_dim_trans_1 = 128 input_trans_1 = Input(shape=(max_seq_len_trans_1, ), name='input_trans_1_layer') input_trans_1_label = Input(shape=(max_seq_len_trans_1, ), name='input_trans_1_label_layer') x = Embedding(input_dim=5307 + 1, output_dim=128, weights=[emb1], trainable=False, input_length=40, mask_zero=True)(input_trans_1) x_label = Embedding(input_dim=2 + 1, output_dim=128, weights=[emb1_label], trainable=False, input_length=40, mask_zero=True)(input_trans_1_label) encodings = PositionEncoding(model_dim_trans_1)(x) encodings = Add()([x, encodings]) encodings = Add()([x_label, encodings]) # encodings = x masks = tf.equal(input_trans_1, 0) # (bs, 100, 128*2) attention_out = MultiHeadAttention( 4, 32)([encodings, encodings, encodings, masks]) # Add & Norm attention_out += encodings attention_out = LayerNormalization()(attention_out) # Feed-Forward ff = PositionWiseFeedForward(model_dim_trans_1, feed_forward_size_trans_1) ff_out = ff(attention_out) # Add & Norm ff_out += attention_out encodings = LayerNormalization()(ff_out) encodings = GlobalMaxPooling1D()(encodings) encodings = Dropout(0.2)(encodings) output_trans_1 = Dense(5, activation='softmax', name='output_trans_1_layer')(encodings) #!################################################################################################################ feed_forward_size_trans_2 = 2048 max_seq_len_trans_2 = 40 model_dim_trans_2 = 128 input_trans_2 = Input(shape=(max_seq_len_trans_2, ), name='input_trans_2_layer') input_trans_2_label = Input(shape=(max_seq_len_trans_2, ), name='input_trans_2_label_layer') x = Embedding(input_dim=101 + 1, output_dim=128, weights=[emb2], trainable=False, input_length=40, mask_zero=True)(input_trans_2) x_label = Embedding(input_dim=2 + 1, output_dim=128, weights=[emb2_label], trainable=False, input_length=40, mask_zero=True)(input_trans_2_label) encodings = PositionEncoding(model_dim_trans_2)(x) encodings = Add()([x, encodings]) encodings = Add()([x_label, encodings]) # encodings = x masks = tf.equal(input_trans_2, 0) # (bs, 100, 128*2) attention_out = MultiHeadAttention( 4, 32)([encodings, encodings, encodings, masks]) # Add & Norm attention_out += encodings attention_out = LayerNormalization()(attention_out) # Feed-Forward ff = PositionWiseFeedForward(model_dim_trans_2, feed_forward_size_trans_2) ff_out = ff(attention_out) # Add & Norm ff_out += attention_out encodings = LayerNormalization()(ff_out) encodings = GlobalMaxPooling1D()(encodings) encodings = Dropout(0.2)(encodings) output_trans_2 = Dense(5, activation='softmax', name='output_trans_2_layer')(encodings) #!################################################################################################################ feed_forward_size_trans_3 = 2048 max_seq_len_trans_3 = 40 model_dim_trans_3 = 128 input_trans_3 = Input(shape=(max_seq_len_trans_3, ), name='input_trans_3_layer') input_trans_3_label = Input(shape=(max_seq_len_trans_3, ), name='input_trans_3_label_layer') x = Embedding(input_dim=8 + 1, output_dim=128, weights=[emb3], trainable=False, input_length=40, mask_zero=True)(input_trans_3) x_label = Embedding(input_dim=2 + 1, output_dim=128, weights=[emb3_label], trainable=False, input_length=40, mask_zero=True)(input_trans_3_label) encodings = PositionEncoding(model_dim_trans_3)(x) encodings = Add()([x, encodings]) encodings = Add()([x_label, encodings]) # encodings = x masks = tf.equal(input_trans_3, 0) # (bs, 100, 128*2) attention_out = MultiHeadAttention( 4, 32)([encodings, encodings, encodings, masks]) # Add & Norm attention_out += encodings attention_out = LayerNormalization()(attention_out) # Feed-Forward ff = PositionWiseFeedForward(model_dim_trans_3, feed_forward_size_trans_3) ff_out = ff(attention_out) # Add & Norm ff_out += attention_out encodings = LayerNormalization()(ff_out) encodings = GlobalMaxPooling1D()(encodings) encodings = Dropout(0.2)(encodings) output_trans_3 = Dense(5, activation='softmax', name='output_trans_3_layer')(encodings) #!################################################################################################################ feed_forward_size_trans_4 = 2048 max_seq_len_trans_4 = 40 model_dim_trans_4 = 128 input_trans_4 = Input(shape=(max_seq_len_trans_4, ), name='input_trans_4_layer') input_trans_4_label = Input(shape=(max_seq_len_trans_4, ), name='input_trans_4_label_layer') x = Embedding(input_dim=38 + 1, output_dim=128, weights=[emb4], trainable=False, input_length=40, mask_zero=True)(input_trans_4) x_label = Embedding(input_dim=2 + 1, output_dim=128, weights=[emb4_label], trainable=False, input_length=40, mask_zero=True)(input_trans_4_label) encodings = PositionEncoding(model_dim_trans_4)(x) encodings = Add()([x, encodings]) encodings = Add()([x_label, encodings]) # encodings = x masks = tf.equal(input_trans_4, 0) # (bs, 100, 128*2) attention_out = MultiHeadAttention( 4, 32)([encodings, encodings, encodings, masks]) # Add & Norm attention_out += encodings attention_out = LayerNormalization()(attention_out) # Feed-Forward ff = PositionWiseFeedForward(model_dim_trans_4, feed_forward_size_trans_4) ff_out = ff(attention_out) # Add & Norm ff_out += attention_out encodings = LayerNormalization()(ff_out) encodings = GlobalMaxPooling1D()(encodings) encodings = Dropout(0.2)(encodings) output_trans_4 = Dense(5, activation='softmax', name='output_trans_4_layer')(encodings) #!################################################################################################################ feed_forward_size_trans_5 = 2048 max_seq_len_trans_5 = 40 model_dim_trans_5 = 128 input_trans_5 = Input(shape=(max_seq_len_trans_5, ), name='input_trans_5_layer') input_trans_5_label = Input(shape=(max_seq_len_trans_5, ), name='input_trans_5_label_layer') x = Embedding(input_dim=4317 + 1, output_dim=128, weights=[emb5], trainable=False, input_length=40, mask_zero=True)(input_trans_5) x_label = Embedding(input_dim=2 + 1, output_dim=128, weights=[emb5_label], trainable=False, input_length=40, mask_zero=True)(input_trans_5_label) encodings = PositionEncoding(model_dim_trans_5)(x) encodings = Add()([x, encodings]) encodings = Add()([x_label, encodings]) # encodings = x masks = tf.equal(input_trans_5, 0) # (bs, 100, 128*2) attention_out = MultiHeadAttention( 4, 32)([encodings, encodings, encodings, masks]) # Add & Norm attention_out += encodings attention_out = LayerNormalization()(attention_out) # Feed-Forward ff = PositionWiseFeedForward(model_dim_trans_5, feed_forward_size_trans_5) ff_out = ff(attention_out) # Add & Norm ff_out += attention_out encodings = LayerNormalization()(ff_out) encodings = GlobalMaxPooling1D()(encodings) encodings = Dropout(0.2)(encodings) output_trans_5 = Dense(5, activation='softmax', name='output_trans_5_layer')(encodings) #!################################################################################################################ trans_output = concatenate([output_trans_1, output_trans_2], axis=-1) trans_output = concatenate([trans_output, output_trans_3], axis=-1) trans_output = concatenate([trans_output, output_trans_4], axis=-1) trans_output = concatenate([trans_output, output_trans_5], axis=-1) # trans_output = Dense(2, activation='softmax', name='output_trans')(trans_output) #!################################################################################################################ #!mix2 features = build_input_features(linear_feature_columns + dnn_feature_columns) inputs_list = list(features.values()) linear_logit = get_linear_logit(features, linear_feature_columns, seed=seed, prefix='linear', l2_reg=l2_reg_linear) group_embedding_dict, dense_value_list = input_from_feature_columns( features, dnn_feature_columns, l2_reg_embedding, seed, support_group=True) fm_logit = add_func([ FM()(concat_func(v, axis=1)) for k, v in group_embedding_dict.items() if k in fm_group ]) dnn_input = combined_dnn_input( list(chain.from_iterable(group_embedding_dict.values())), dense_value_list) mix = concatenate([trans_output, dnn_input], axis=-1) #!#mix dnn_output = DNN(dnn_hidden_units, dnn_activation, l2_reg_dnn, dnn_dropout, dnn_use_bn, seed)(mix) dnn_logit = tf.keras.layers.Dense(1, use_bias=False, activation=None)(dnn_output) final_logit = add_func([linear_logit, fm_logit, dnn_logit]) output = PredictionLayer(task)(final_logit) #!################################################################################################################ model = Model(inputs=[ input_trans_1, input_trans_1_label, input_trans_2, input_trans_2_label, input_trans_3, input_trans_3_label, input_trans_4, input_trans_4_label, input_trans_5, input_trans_5_label, features ], outputs=[output]) model.compile(optimizer=optimizers.Adam(2.5e-4), loss={'prediction_layer': losses.binary_crossentropy}, metrics=['AUC']) return model
def xDeepFM_MTL(feature_dim_dict, embedding_size=8, hidden_size=(256, 256), cin_layer_size=(256, 256,), cin_split_half=True, task_net_size=(128,), l2_reg_linear=0.00001, l2_reg_embedding=0.00001, seed=1024, ): """ :param feature_dim_dict: 特征词典,包括特征名和特征列表 :param embedding_size: :param hidden_size: :param cin_layer_size: :param cin_split_half: :param task_net_size: 网络层数 :param l2_reg_linear: :param l2_reg_embedding: :param seed: :return: """ # 判断sparse 和dense feature结构是否正确 check_feature_config_dict(feature_dim_dict) if len(task_net_size) < 1: raise ValueError('task_net_size must be at least one layer') # Todo, add text sequence embedding deep_emb_list, linear_logit, inputs_list = preprocess_input_embedding( feature_dim_dict, embedding_size, l2_reg_embedding, l2_reg_linear, 0.0001, seed) # video_input = tf.keras.layers.Input((128,)) # inputs_list.append(video_input) # TODO, add other feature if 'txt' in feature_dim_dict: # txt_input = OrderedDict() for i, feat in enumerate(feature_dim_dict["txt"]): txt_input = tf.keras.layers.Input( shape=(feat.dimension,), name='txt_' + str(i) + '-' + feat.name) inputs_list.append(txt_input) fm_input = concat_fun(deep_emb_list, axis=1) if len(cin_layer_size) > 0: exFM_out = CIN(cin_layer_size, 'relu', cin_split_half, seed)(fm_input) exFM_logit = tf.keras.layers.Dense(1, activation=None, )(exFM_out) deep_input = tf.keras.layers.Flatten()(fm_input) deep_out = MLP(hidden_size)(deep_input) finish_out = MLP(task_net_size)(deep_out) finish_logit = tf.keras.layers.Dense( 1, use_bias=False, activation=None)(finish_out) like_out = MLP(task_net_size)(deep_out) like_logit = tf.keras.layers.Dense( 1, use_bias=False, activation=None)(like_out) finish_logit = tf.keras.layers.add( [linear_logit, finish_logit, exFM_logit]) like_logit = tf.keras.layers.add( [linear_logit, like_logit, exFM_logit]) output_finish = PredictionLayer('sigmoid', name='finish')(finish_logit) output_like = PredictionLayer('sigmoid', name='like')(like_logit) model = tf.keras.models.Model(inputs=inputs_list, outputs=[ output_finish, output_like]) return model
def xDeepFM_MTL( linear_feature_columns, dnn_feature_columns, embedding_size=8, dnn_hidden_units=(256, 256), cin_layer_size=( 256, 256, ), cin_split_half=True, init_std=0.0001, l2_reg_dnn=0, dnn_dropout=0, dnn_activation='relu', dnn_use_bn=False, task_net_size=(128, ), l2_reg_linear=0.00001, l2_reg_embedding=0.00001, seed=1024, ): # check_feature_config_dict(feature_dim_dict) if len(task_net_size) < 1: raise ValueError('task_net_size must be at least one layer') features = build_input_features(linear_feature_columns + dnn_feature_columns) inputs_list = list(features.values()) sparse_embedding_list, dense_value_list = input_from_feature_columns( features, dnn_feature_columns, embedding_size, l2_reg_embedding, init_std, seed) linear_logit = get_linear_logit(features, linear_feature_columns, l2_reg=l2_reg_linear, init_std=init_std, seed=seed, prefix='linear') fm_input = concat_fun(sparse_embedding_list, axis=1) if len(cin_layer_size) > 0: exFM_out = CIN(cin_layer_size, 'relu', cin_split_half, 0, seed)(fm_input) exFM_logit = tf.keras.layers.Dense( 1, activation=None, )(exFM_out) dnn_input = combined_dnn_input(sparse_embedding_list, dense_value_list) deep_out = DNN(dnn_hidden_units, dnn_activation, l2_reg_dnn, dnn_dropout, dnn_use_bn, seed)(dnn_input) like_out = DNN(task_net_size)(deep_out) like_logit = tf.keras.layers.Dense(1, use_bias=False, activation=None)(like_out) like_logit = tf.keras.layers.add([linear_logit, like_logit, exFM_logit]) output_like = PredictionLayer('binary', name='like')(like_logit) model = tf.keras.models.Model(inputs=inputs_list, outputs=output_like) return model
def DIN(feature_dim_dict, seq_feature_list, embedding_size=8, hist_len_max=16, dnn_use_bn=False, dnn_hidden_units=(200, 80), dnn_activation='relu', att_hidden_size=(80, 40), att_activation="dice", att_weight_normalization=False, l2_reg_dnn=0, l2_reg_embedding=1e-6, dnn_dropout=0, init_std=0.0001, seed=1024, task='binary'): """Instantiates the Deep Interest Network architecture. :param feature_dim_dict: dict,to indicate sparse field (**now only support sparse feature**)like {'sparse':{'field_1':4,'field_2':3,'field_3':2},'dense':[]} :param seq_feature_list: list,to indicate sequence sparse field (**now only support sparse feature**),must be a subset of ``feature_dim_dict["sparse"]`` :param embedding_size: positive integer,sparse feature embedding_size. :param hist_len_max: positive int, to indicate the max length of seq input :param dnn_use_bn: bool. Whether use BatchNormalization before activation or not in deep net :param dnn_hidden_units: list,list of positive integer or empty list, the layer number and units in each layer of deep net :param dnn_activation: Activation function to use in deep net :param att_hidden_size: list,list of positive integer , the layer number and units in each layer of attention net :param att_activation: Activation function to use in attention net :param att_weight_normalization: bool.Whether normalize the attention score of local activation unit. :param l2_reg_dnn: float. L2 regularizer strength applied to DNN :param l2_reg_embedding: float. L2 regularizer strength applied to embedding vector :param dnn_dropout: float in [0,1), the probability we will drop out a given DNN coordinate. :param init_std: float,to use as the initialize std of embedding vector :param seed: integer ,to use as random seed. :param task: str, ``"binary"`` for binary logloss or ``"regression"`` for regression loss :return: A Keras model instance. """ check_feature_config_dict(feature_dim_dict) sparse_input, dense_input, user_behavior_input = get_input( feature_dim_dict, seq_feature_list, hist_len_max) sparse_embedding_dict = { feat.name: Embedding(feat.dimension, embedding_size, embeddings_initializer=RandomNormal(mean=0.0, stddev=init_std, seed=seed), embeddings_regularizer=l2(l2_reg_embedding), name='sparse_emb_' + str(i) + '-' + feat.name, mask_zero=(feat.name in seq_feature_list)) for i, feat in enumerate(feature_dim_dict["sparse"]) } query_emb_list = get_embedding_vec_list(sparse_embedding_dict, sparse_input, feature_dim_dict['sparse'], seq_feature_list, seq_feature_list) keys_emb_list = get_embedding_vec_list(sparse_embedding_dict, user_behavior_input, feature_dim_dict['sparse'], seq_feature_list, seq_feature_list) deep_input_emb_list = get_embedding_vec_list( sparse_embedding_dict, sparse_input, feature_dim_dict['sparse'], mask_feat_list=seq_feature_list) keys_emb = concat_fun(keys_emb_list) deep_input_emb = concat_fun(deep_input_emb_list) query_emb = concat_fun(query_emb_list) hist = AttentionSequencePoolingLayer( att_hidden_size, att_activation, weight_normalization=att_weight_normalization, supports_masking=True)([query_emb, keys_emb]) deep_input_emb = Concatenate()([NoMask()(deep_input_emb), hist]) deep_input_emb = Flatten()(deep_input_emb) if len(dense_input) > 0: deep_input_emb = Concatenate()([deep_input_emb] + list(dense_input.values())) output = DNN(dnn_hidden_units, dnn_activation, l2_reg_dnn, dnn_dropout, dnn_use_bn, seed)(deep_input_emb) final_logit = Dense(1, use_bias=False)(output) output = PredictionLayer(task)(final_logit) model_input_list = get_inputs_list( [sparse_input, dense_input, user_behavior_input]) model = Model(inputs=model_input_list, outputs=output) return model
def create_model(linear_feature_columns, dnn_feature_columns, fm_group=[DEFAULT_GROUP_NAME], dnn_hidden_units=(128, 128), l2_reg_linear=0.00001, l2_reg_embedding=0.00001, l2_reg_dnn=0, seed=1024, dnn_dropout=0, dnn_activation='relu', dnn_use_bn=False, task='binary'): K.clear_session() #!################################################################################################################ inputs_all = [ # get_input_feature_layer(name = 'user_0',feature_shape = dense_feature_size), # get_input_feature_layer(name = 'item_0',feature_shape = dense_feature_size), get_input_feature_layer(name='user_1', feature_shape=dense_feature_size), get_input_feature_layer(name='item_1', feature_shape=dense_feature_size) ] # slotid_nettype # layer_user_0 = inputs_all[0] # layer_user_0 = K.expand_dims(layer_user_0, 1) # layer_item_0 = inputs_all[1] # layer_item_0 = K.expand_dims(layer_item_0, 1) layer_user_1 = inputs_all[0] layer_user_1 = K.expand_dims(layer_user_1, 1) layer_item_1 = inputs_all[1] layer_item_1 = K.expand_dims(layer_item_1, 1) # cross_emb_out0 = cross_net(layer_user_0,layer_item_0) cross_emb_out1 = cross_net(layer_user_1, layer_item_1) # cross_emb_out = tf.keras.layers.concatenate([cross_emb_out0,cross_emb_out1]) cross_emb_out = tf.squeeze(cross_emb_out1, [1]) #!################################################################################################################ seq_inputs_dict = get_seq_input_layers(cols=arr_name_list) inputs_all = inputs_all + list(seq_inputs_dict.values()) # 输入层list masks = tf.equal(seq_inputs_dict['task_id'], 0) # 普通序列+label序列 layers2concat = [] for index, col in enumerate(arr_name_list): print(col, 'get embedding!') emb_layer = get_emb_layer(col, trainable=TRAINABLE_DICT[col], emb_matrix=id_list_dict_emb_all[col][1]) x = emb_layer(seq_inputs_dict[col]) if conv1d_info_dict[col] > -1: cov_layer = tf.keras.layers.Conv1D(filters=conv1d_info_dict[col], kernel_size=1, activation='relu') x = cov_layer(x) layers2concat.append(x) x = keras.layers.concatenate(layers2concat) #!################################################################################################################ #!mix1 x = trans_net(x, masks, hidden_unit=256) max_pool = tf.keras.layers.GlobalMaxPooling1D() average_pool = tf.keras.layers.GlobalAveragePooling1D() xmaxpool = max_pool(x) xmeanpool = average_pool(x) trans_output = tf.keras.layers.concatenate([xmaxpool, xmeanpool]) #!################################################################################################################ #!mix2 features = build_input_features(linear_feature_columns + dnn_feature_columns) inputs_list = list(features.values()) linear_logit = get_linear_logit(features, linear_feature_columns, seed=seed, prefix='linear', l2_reg=l2_reg_linear) group_embedding_dict, dense_value_list = input_from_feature_columns( features, dnn_feature_columns, l2_reg_embedding, seed, support_group=True) fm_logit = add_func([ FM()(concat_func(v, axis=1)) for k, v in group_embedding_dict.items() if k in fm_group ]) dnn_input = combined_dnn_input( list(chain.from_iterable(group_embedding_dict.values())), dense_value_list) mix = concatenate([cross_emb_out, trans_output, dnn_input], axis=-1) # !#mix dnn_output = DNN(dnn_hidden_units, dnn_activation, l2_reg_dnn, dnn_dropout, dnn_use_bn, seed)(mix) dnn_logit = tf.keras.layers.Dense(1, use_bias=False, activation=None)(dnn_output) final_logit = add_func([linear_logit, fm_logit, dnn_logit]) output = PredictionLayer(task)(final_logit) #!################################################################################################################ model = Model(inputs=inputs_all + [features], outputs=[output]) print(model.summary()) return model
def DSSM(user_dnn_feature_columns, item_dnn_feature_columns, gamma=1, dnn_use_bn=True, dnn_hidden_units=(300, 300, 128), dnn_activation='tanh', l2_reg_dnn=0, l2_reg_embedding=1e-6, dnn_dropout=0, init_std=0.0001, seed=1024, task='binary'): """Instantiates the Deep Structured Semantic Model architecture. :param user_dnn_feature_columns:An iterable containing user's features used by deep part of the model. :param item_dnn_feature_columns:An iterable containing item's the features used by deep part of the model. :param gamma: smoothing factor in the softmax function for DSSM :param dnn_use_bn: bool. Whether use BatchNormalization before activation or not in deep net :param dnn_hidden_units: list,list of positive integer or empty list, the layer number and units in each layer of deep net :param dnn_activation: Activation function to use in deep net :param l2_reg_dnn: float. L2 regularizer strength applied to DNN :param l2_reg_embedding: float. L2 regularizer strength applied to embedding vector :param dnn_dropout: float in [0,1), the probability we will drop out a given DNN coordinate. :param init_std: float,to use as the initialize std of embedding vector :param seed: integer ,to use as random seed. :param task: str, ``"binary"`` for binary logloss or ``"regression"`` for regression loss :return: A Keras model instance. """ user_features = build_input_features(user_dnn_feature_columns) user_inputs_list = list(user_features.values()) user_sparse_embedding_list, user_dense_value_list = input_from_feature_columns( user_features, user_dnn_feature_columns, l2_reg_embedding, init_std, seed) user_dnn_input = combined_dnn_input(user_sparse_embedding_list, user_dense_value_list) item_features = build_input_features(item_dnn_feature_columns) item_inputs_list = list(item_features.values()) item_sparse_embedding_list, item_dense_value_list = input_from_feature_columns( item_features, item_dnn_feature_columns, l2_reg_embedding, init_std, seed) item_dnn_input = combined_dnn_input(item_sparse_embedding_list, item_dense_value_list) user_dnn_out = DNN(dnn_hidden_units, dnn_activation, l2_reg_dnn, dnn_dropout, dnn_use_bn, seed, name="user_embedding")(user_dnn_input) item_dnn_out = DNN(dnn_hidden_units, dnn_activation, l2_reg_dnn, dnn_dropout, dnn_use_bn, seed, name="item_embedding")(item_dnn_input) score = Cosine_Similarity(user_dnn_out, item_dnn_out, gamma=gamma) output = PredictionLayer(task, False)(score) model = Model(inputs=user_inputs_list + item_inputs_list, outputs=output) return model
def DSSM(user_feature_columns, item_feature_columns, user_dnn_hidden_units=(64, 32), item_dnn_hidden_units=(64, 32), dnn_activation='tanh', dnn_use_bn=False, l2_reg_dnn=0, l2_reg_embedding=1e-6, dnn_dropout=0, init_std=0.0001, seed=1024, metric='cos'): """Instantiates the Deep Structured Semantic Model architecture. :param user_feature_columns: An iterable containing user's features used by the model. :param item_feature_columns: An iterable containing item's features used by the model. :param user_dnn_hidden_units: list,list of positive integer or empty list, the layer number and units in each layer of user tower :param item_dnn_hidden_units: list,list of positive integer or empty list, the layer number and units in each layer of item tower :param dnn_activation: Activation function to use in deep net :param dnn_use_bn: bool. Whether use BatchNormalization before activation or not in deep net :param l2_reg_dnn: float. L2 regularizer strength applied to DNN :param l2_reg_embedding: float. L2 regularizer strength applied to embedding vector :param dnn_dropout: float in [0,1), the probability we will drop out a given DNN coordinate. :param init_std: float,to use as the initialize std of embedding vector :param seed: integer ,to use as random seed. :param metric: str, ``"cos"`` for cosine or ``"ip"`` for inner product :return: A Keras model instance. """ embedding_matrix_dict = create_embedding_matrix(user_feature_columns + item_feature_columns, l2_reg_embedding, init_std, seed, seq_mask_zero=True) user_features = build_input_features(user_feature_columns) user_inputs_list = list(user_features.values()) user_sparse_embedding_list, user_dense_value_list = input_from_feature_columns( user_features, user_feature_columns, l2_reg_embedding, init_std, seed, embedding_matrix_dict=embedding_matrix_dict) user_dnn_input = combined_dnn_input(user_sparse_embedding_list, user_dense_value_list) item_features = build_input_features(item_feature_columns) item_inputs_list = list(item_features.values()) item_sparse_embedding_list, item_dense_value_list = input_from_feature_columns( item_features, item_feature_columns, l2_reg_embedding, init_std, seed, embedding_matrix_dict=embedding_matrix_dict) item_dnn_input = combined_dnn_input(item_sparse_embedding_list, item_dense_value_list) user_dnn_out = DNN( user_dnn_hidden_units, dnn_activation, l2_reg_dnn, dnn_dropout, dnn_use_bn, seed, )(user_dnn_input) item_dnn_out = DNN(item_dnn_hidden_units, dnn_activation, l2_reg_dnn, dnn_dropout, dnn_use_bn, seed)(item_dnn_input) score = Similarity(type=metric)([user_dnn_out, item_dnn_out]) output = PredictionLayer("binary", False)(score) model = Model(inputs=user_inputs_list + item_inputs_list, outputs=output) model.__setattr__("user_input", user_inputs_list) model.__setattr__("item_input", item_inputs_list) model.__setattr__("user_embedding", user_dnn_out) model.__setattr__("item_embedding", item_dnn_out) return model
def xDeepFM_MTL(linear_feature_columns, dnn_feature_columns, dnn_hidden_units=(256, 256), task_net_size=(128, ), cin_layer_size=( 128, 128, ), cin_split_half=True, cin_activation='relu', l2_reg_linear=0.00001, l2_reg_embedding=0.00001, l2_reg_dnn=0, l2_reg_cin=0, seed=1024, dnn_dropout=0, dnn_activation='relu', dnn_use_bn=False, task='binary'): """Instantiates the xDeepFM architecture. :param linear_feature_columns: An iterable containing all the features used by linear part of the model. :param dnn_feature_columns: An iterable containing all the features used by deep part of the model. :param dnn_hidden_units: list,list of positive integer or empty list, the layer number and units in each layer of deep net :param cin_layer_size: list,list of positive integer or empty list, the feature maps in each hidden layer of Compressed Interaction Network :param cin_split_half: bool.if set to True, half of the feature maps in each hidden will connect to output unit :param cin_activation: activation function used on feature maps :param l2_reg_linear: float. L2 regularizer strength applied to linear part :param l2_reg_embedding: L2 regularizer strength applied to embedding vector :param l2_reg_dnn: L2 regularizer strength applied to deep net :param l2_reg_cin: L2 regularizer strength applied to CIN. :param seed: integer ,to use as random seed. :param dnn_dropout: float in [0,1), the probability we will drop out a given DNN coordinate. :param dnn_activation: Activation function to use in DNN :param dnn_use_bn: bool. Whether use BatchNormalization before activation or not in DNN :param task: str, ``"binary"`` for binary logloss or ``"regression"`` for regression loss :return: A Keras model instance. """ features = build_input_features(linear_feature_columns + dnn_feature_columns) inputs_list = list(features.values()) linear_logit = get_linear_logit(features, linear_feature_columns, seed=seed, prefix='linear', l2_reg=l2_reg_linear) sparse_embedding_list, dense_value_list = input_from_feature_columns( features, dnn_feature_columns, l2_reg_embedding, seed) fm_input = concat_func(sparse_embedding_list, axis=1) if len(cin_layer_size) > 0: exFM_out = CIN(cin_layer_size, cin_activation, cin_split_half, l2_reg_cin, seed)(fm_input) exFM_logit = tf.keras.layers.Dense( 1, kernel_initializer=tf.keras.initializers.glorot_normal(seed))( exFM_out) dnn_input = combined_dnn_input(sparse_embedding_list, dense_value_list) dnn_output = DNN(dnn_hidden_units, dnn_activation, l2_reg_dnn, dnn_dropout, dnn_use_bn, seed=seed)(dnn_input) finish_out = DNN(task_net_size)(dnn_output) finish_logit = tf.keras.layers.Dense(1, use_bias=False, activation=None)(finish_out) like_out = DNN(task_net_size)(dnn_output) like_logit = tf.keras.layers.Dense(1, use_bias=False, activation=None)(like_out) finish_logit = tf.keras.layers.add( [linear_logit, finish_logit, exFM_logit]) like_logit = tf.keras.layers.add([linear_logit, like_logit, exFM_logit]) output_finish = PredictionLayer('binary', name='finish')(finish_logit) output_like = PredictionLayer('binary', name='like')(like_logit) model = tf.keras.models.Model(inputs=inputs_list, outputs=[output_finish, output_like]) return model
def create_model(linear_feature_columns, dnn_feature_columns, fm_group=[DEFAULT_GROUP_NAME], dnn_hidden_units=(128, 128), l2_reg_linear=0.00001, l2_reg_embedding=0.00001, l2_reg_dnn=0, seed=1024, dnn_dropout=0, dnn_activation='relu', dnn_use_bn=False, task='binary'): K.clear_session() #!################################################################################################################ inputs_all = [ get_input_feature_layer(name='slotid_nettype', feature_shape=dense_feature_size) ] # slotid_nettype layer_slotid_nettype = inputs_all[0] layer_slotid_nettype = K.expand_dims(layer_slotid_nettype, 1) #!################################################################################################################ # seq_inputs_dict = get_cross_seq_input_layers(cols=cross_arr_name_list) # inputs_all = inputs_all + list(seq_inputs_dict.values()) # 输入层list 做交叉 # cross_emb_out = [] # last_col = '' # for index, col in enumerate(cross_arr_name_list): # # print(col, 'get embedding!') # emb_layer = get_emb_layer( # col, trainable=False, emb_matrix=dict_cross_emb_all[col]) # x = emb_layer(inputs_all[1+index]) # if col.split('_')[-1] == 'i': # cross_user_item_i = x # last_col = col # continue # else: # print(f'crossing net add {last_col} and {col}') # cross_emb_out.append( # cross_net(cross_user_item_i, x, layer_slotid_nettype, hidden_unit=4)) # cross_emb_out = tf.keras.layers.concatenate(cross_emb_out) # cross_emb_out = tf.squeeze(cross_emb_out, [1]) #!################################################################################################################ # seq_inputs_dict = get_seq_input_layers(cols=arr_name_list) # inputs_all = inputs_all+list(seq_inputs_dict.values()) # 输入层list # masks = tf.equal(seq_inputs_dict['task_id'], 0) # # 普通序列+label序列 # layers2concat = [] # for index, col in enumerate(arr_name_list): # print(col, 'get embedding!') # emb_layer = get_emb_layer( # col, trainable=TRAINABLE_DICT[col], emb_matrix=id_list_dict_emb_all[col][1]) # x = emb_layer(seq_inputs_dict[col]) # if conv1d_info_dict[col] > -1: # cov_layer = tf.keras.layers.Conv1D(filters=conv1d_info_dict[col], # kernel_size=1, # activation='relu') # x = cov_layer(x) # layers2concat.append(x) # x = tf.keras.layers.concatenate(layers2concat) #!################################################################################################################ #!mix1 # x = trans_net(x, masks, hidden_unit=256) # max_pool = tf.keras.layers.GlobalMaxPooling1D() # average_pool = tf.keras.layers.GlobalAveragePooling1D() # xmaxpool = max_pool(x) # xmeanpool = average_pool(x) # trans_output = tf.keras.layers.concatenate([xmaxpool, xmeanpool]) #!################################################################################################################ #!mix2 features = build_input_features(linear_feature_columns + dnn_feature_columns) inputs_list = list(features.values()) linear_logit = get_linear_logit(features, linear_feature_columns, seed=seed, prefix='linear', l2_reg=l2_reg_linear) group_embedding_dict, dense_value_list = input_from_feature_columns( features, dnn_feature_columns, l2_reg_embedding, seed, support_group=True) fm_logit = add_func([ FM()(concat_func(v, axis=1)) for k, v in group_embedding_dict.items() if k in fm_group ]) dnn_input = combined_dnn_input( list(chain.from_iterable(group_embedding_dict.values())), dense_value_list) # mix = concatenate([cross_emb_out, trans_output, # dnn_input], axis=-1) # !#mix mix = dnn_input dnn_output = DNN(dnn_hidden_units, dnn_activation, l2_reg_dnn, dnn_dropout, dnn_use_bn, seed)(mix) dnn_logit = tf.keras.layers.Dense(1, use_bias=False, activation=None)(dnn_output) final_logit = add_func([linear_logit, fm_logit, dnn_logit]) output = PredictionLayer(task)(final_logit) #!################################################################################################################ # model = Model(inputs=inputs_all+[features], model = Model(inputs=inputs_list, outputs=[output]) print(model.summary()) return model
def DeepFM(linear_feature_columns, dnn_feature_columns, fm_group=[DEFAULT_GROUP_NAME], dnn_hidden_units=(128, 128), l2_reg_linear=0.00001, l2_reg_embedding=0.00001, l2_reg_dnn=0, init_std=0.0001, seed=1024, dnn_dropout=0, dnn_activation='relu', dnn_use_bn=False, task='binary', use_image=False, use_text=False, embedding_size=128): """Instantiates the DeepFM Network architecture. :param linear_feature_columns: An iterable containing all the features used by linear part of the model. :param dnn_feature_columns: An iterable containing all the features used by deep part of the model. :param fm_group: list, group_name of features that will be used to do feature interactions. :param dnn_hidden_units: list,list of positive integer or empty list, the layer number and units in each layer of DNN :param l2_reg_linear: float. L2 regularizer strength applied to linear part :param l2_reg_embedding: float. L2 regularizer strength applied to embedding vector :param l2_reg_dnn: float. L2 regularizer strength applied to DNN :param init_std: float,to use as the initialize std of embedding vector :param seed: integer ,to use as random seed. :param dnn_dropout: float in [0,1), the probability we will drop out a given DNN coordinate. :param dnn_activation: Activation function to use in DNN :param dnn_use_bn: bool. Whether use BatchNormalization before activation or not in DNN :param task: str, ``"binary"`` for binary logloss or ``"regression"`` for regression loss :return: A Keras model instance. """ train_path = '../data/underexpose_train' features = build_input_features(linear_feature_columns + dnn_feature_columns) inputs_list = list(features.values()) group_embedding_dict, dense_value_list = input_from_feature_columns( features, dnn_feature_columns, l2_reg_embedding, init_std, seed, support_group=True) if use_image: video_input = tf.keras.layers.Input(shape=(128, ), name='image') video_emb = tf.keras.layers.Dense( embedding_size, use_bias=False, kernel_regularizer=l2(l2_reg_embedding))(video_input) video_emb = tf.keras.layers.Reshape( (1, embedding_size), input_shape=(embedding_size, ))(video_emb) group_embedding_dict[DEFAULT_GROUP_NAME].append(video_emb) inputs_list.append(video_input) if use_text: audio_input = tf.keras.layers.Input(shape=(128, ), name='text') audio_emb = tf.keras.layers.Dense( embedding_size, use_bias=False, kernel_regularizer=l2(l2_reg_embedding))(audio_input) audio_emb = tf.keras.layers.Reshape( (1, embedding_size), input_shape=(embedding_size, ))(audio_emb) group_embedding_dict[DEFAULT_GROUP_NAME].append(audio_emb) inputs_list.append(audio_input) linear_logit = get_linear_logit(features, linear_feature_columns, init_std=init_std, seed=seed, prefix='linear', l2_reg=l2_reg_linear) fm_logit = add_func([ FM()(concat_func(v, axis=1)) for k, v in group_embedding_dict.items() if k in fm_group ]) dnn_input = combined_dnn_input( list(chain.from_iterable(group_embedding_dict.values())), dense_value_list) dnn_output = DNN(dnn_hidden_units, dnn_activation, l2_reg_dnn, dnn_dropout, dnn_use_bn, seed)(dnn_input) dnn_logit = tf.keras.layers.Dense(1, use_bias=False, activation=None)(dnn_output) final_logit = add_func([linear_logit, fm_logit, dnn_logit]) output = PredictionLayer(task)(final_logit) model = tf.keras.models.Model(inputs=inputs_list, outputs=output) return model
def DSIN( feature_dim_dict, sess_feature_list, embedding_size=8, sess_max_count=5, sess_len_max=10, att_embedding_size=1, att_head_num=8, dnn_hidden_units=(200, 80), dnn_activation='sigmoid', l2_reg_dnn=0, l2_reg_embedding=1e-6, task='binary', dnn_dropout=0, init_std=0.0001, seed=1024, encoding='bias', ): check_feature_config_dict(feature_dim_dict) print( 'sess_count', sess_max_count, 'encoding', encoding, ) sparse_input, dense_input, user_behavior_input_dict, _, user_sess_length = get_input( feature_dim_dict, sess_feature_list, sess_max_count, sess_len_max) sparse_embedding_dict = { feat.name: Embedding(feat.dimension, embedding_size, embeddings_initializer=RandomNormal(mean=0.0, stddev=init_std, seed=seed), embeddings_regularizer=l2(l2_reg_embedding), name='sparse_emb_' + str(i) + '-' + feat.name, mask_zero=(feat.name in sess_feature_list)) for i, feat in enumerate(feature_dim_dict["sparse"]) } query_emb_list = get_embedding_vec_list(sparse_embedding_dict, sparse_input, feature_dim_dict["sparse"], sess_feature_list, sess_feature_list) query_emb = concat_fun(query_emb_list) deep_input_emb_list = get_embedding_vec_list( sparse_embedding_dict, sparse_input, feature_dim_dict["sparse"], mask_feat_list=sess_feature_list) deep_input_emb = concat_fun(deep_input_emb_list) deep_input_emb = Flatten()(NoMask()(deep_input_emb)) be_flag = True if encoding == 'bias' else False tr_input = sess_interest_division(sparse_embedding_dict, user_behavior_input_dict, feature_dim_dict['sparse'], sess_feature_list, sess_max_count, bias_encoding=be_flag) Self_Attention = Transformer(att_embedding_size, att_head_num, dropout_rate=0, use_layer_norm=False, use_positional_encoding=(not be_flag), seed=seed, supports_masking=True, blinding=True) sess_fea = sess_interest_extractor(tr_input, sess_max_count, Self_Attention) interest_attention_layer = AttentionSequencePoolingLayer( att_hidden_units=(64, 16), weight_normalization=True, supports_masking=False)([query_emb, sess_fea, user_sess_length]) lstm_outputs = BiLSTM( len(sess_feature_list) * embedding_size, layers=2, res_layers=0, dropout_rate=0.2, )(sess_fea) lstm_attention_layer = AttentionSequencePoolingLayer( att_hidden_units=(64, 16), weight_normalization=True)([query_emb, lstm_outputs, user_sess_length]) deep_input_emb = Concatenate()([ deep_input_emb, Flatten()(interest_attention_layer), Flatten()(lstm_attention_layer) ]) if len(dense_input) > 0: deep_input_emb = Concatenate()([deep_input_emb] + list(dense_input.values())) output = DNN(dnn_hidden_units, dnn_activation, l2_reg_dnn, dnn_dropout, False, seed)(deep_input_emb) output = Dense(1, use_bias=False, activation=None)(output) output = PredictionLayer(task)(output) sess_input_list = [] #sess_input_length_list = [] for i in range(sess_max_count): sess_name = "sess_" + str(i) sess_input_list.extend( get_inputs_list([user_behavior_input_dict[sess_name]])) #sess_input_length_list.append(user_behavior_length_dict[sess_name]) model_input_list = get_inputs_list( [sparse_input, dense_input]) + sess_input_list + [user_sess_length] model = Model(inputs=model_input_list, outputs=output) return model
def DSIN( feature_dim_dict, sess_feature_list, embedding_size=8, sess_max_count=5, sess_len_max=10, bias_encoding=False, att_embedding_size=1, att_head_num=8, dnn_hidden_units=(200, 80), dnn_activation='sigmoid', dnn_dropout=0, dnn_use_bn=False, l2_reg_dnn=0, l2_reg_embedding=1e-6, init_std=0.0001, seed=1024, task='binary', ): """Instantiates the Deep Session Interest Network architecture. :param feature_dim_dict: dict,to indicate sparse field (**now only support sparse feature**)like {'sparse':{'field_1':4,'field_2':3,'field_3':2},'dense':[]} :param sess_feature_list: list,to indicate session feature sparse field (**now only support sparse feature**),must be a subset of ``feature_dim_dict["sparse"]`` :param embedding_size: positive integer,sparse feature embedding_size. :param sess_max_count: positive int, to indicate the max number of sessions :param sess_len_max: positive int, to indicate the max length of each session :param bias_encoding: bool. Whether use bias encoding or postional encoding :param att_embedding_size: positive int, the embedding size of each attention head :param att_head_num: positive int, the number of attention head :param dnn_hidden_units: list,list of positive integer or empty list, the layer number and units in each layer of deep net :param dnn_activation: Activation function to use in deep net :param dnn_dropout: float in [0,1), the probability we will drop out a given DNN coordinate. :param dnn_use_bn: bool. Whether use BatchNormalization before activation or not in deep net :param l2_reg_dnn: float. L2 regularizer strength applied to DNN :param l2_reg_embedding: float. L2 regularizer strength applied to embedding vector :param init_std: float,to use as the initialize std of embedding vector :param seed: integer ,to use as random seed. :param task: str, ``"binary"`` for binary logloss or ``"regression"`` for regression loss :return: A Keras model instance. """ check_feature_config_dict(feature_dim_dict) if (att_embedding_size * att_head_num != len(sess_feature_list) * embedding_size): raise ValueError( "len(session_feature_lsit) * embedding_size must equal to att_embedding_size * att_head_num ,got %d * %d != %d *%d" % (len(sess_feature_list), embedding_size, att_embedding_size, att_head_num)) sparse_input, dense_input, user_behavior_input_dict, _, user_sess_length = get_input( feature_dim_dict, sess_feature_list, sess_max_count, sess_len_max) sparse_embedding_dict = { feat.name: Embedding(feat.dimension, embedding_size, embeddings_initializer=RandomNormal(mean=0.0, stddev=init_std, seed=seed), embeddings_regularizer=l2(l2_reg_embedding), name='sparse_emb_' + str(i) + '-' + feat.name, mask_zero=(feat.name in sess_feature_list)) for i, feat in enumerate(feature_dim_dict["sparse"]) } query_emb_list = get_embedding_vec_list(sparse_embedding_dict, sparse_input, feature_dim_dict["sparse"], sess_feature_list, sess_feature_list) query_emb = concat_fun(query_emb_list) deep_input_emb_list = get_embedding_vec_list( sparse_embedding_dict, sparse_input, feature_dim_dict["sparse"], mask_feat_list=sess_feature_list) deep_input_emb = concat_fun(deep_input_emb_list) deep_input_emb = Flatten()(NoMask()(deep_input_emb)) tr_input = sess_interest_division(sparse_embedding_dict, user_behavior_input_dict, feature_dim_dict['sparse'], sess_feature_list, sess_max_count, bias_encoding=bias_encoding) Self_Attention = Transformer(att_embedding_size, att_head_num, dropout_rate=0, use_layer_norm=False, use_positional_encoding=(not bias_encoding), seed=seed, supports_masking=True, blinding=True) sess_fea = sess_interest_extractor(tr_input, sess_max_count, Self_Attention) interest_attention_layer = AttentionSequencePoolingLayer( att_hidden_units=(64, 16), weight_normalization=True, supports_masking=False)([query_emb, sess_fea, user_sess_length]) lstm_outputs = BiLSTM( len(sess_feature_list) * embedding_size, layers=2, res_layers=0, dropout_rate=0.2, )(sess_fea) lstm_attention_layer = AttentionSequencePoolingLayer( att_hidden_units=(64, 16), weight_normalization=True)([query_emb, lstm_outputs, user_sess_length]) deep_input_emb = Concatenate()([ deep_input_emb, Flatten()(interest_attention_layer), Flatten()(lstm_attention_layer) ]) if len(dense_input) > 0: deep_input_emb = Concatenate()([deep_input_emb] + list(dense_input.values())) output = DNN(dnn_hidden_units, dnn_activation, l2_reg_dnn, dnn_dropout, dnn_use_bn, seed)(deep_input_emb) output = Dense(1, use_bias=False, activation=None)(output) output = PredictionLayer(task)(output) sess_input_list = [] # sess_input_length_list = [] for i in range(sess_max_count): sess_name = "sess_" + str(i) sess_input_list.extend( get_inputs_list([user_behavior_input_dict[sess_name]])) # sess_input_length_list.append(user_behavior_length_dict[sess_name]) model_input_list = get_inputs_list( [sparse_input, dense_input]) + sess_input_list + [user_sess_length] model = Model(inputs=model_input_list, outputs=output) return model
def DeepFM(linear_feature_columns, dnn_feature_columns, embedding_size=8, use_fm=True, only_dnn=False, dnn_hidden_units=(128, 128), l2_reg_linear=0.00001, l2_reg_embedding=0.00001, l2_reg_dnn=0, init_std=0.0001, seed=1024, dnn_dropout=0, dnn_activation='relu', dnn_use_bn=False, task='binary'): """Instantiates the DeepFM Network architecture. :param linear_feature_columns: An iterable containing all the features used by linear part of the model. :param dnn_feature_columns: An iterable containing all the features used by deep part of the model. :param embedding_size: positive integer,sparse feature embedding_size :param use_fm: bool,use FM part or not :param dnn_hidden_units: list,list of positive integer or empty list, the layer number and units in each layer of DNN :param l2_reg_linear: float. L2 regularizer strength applied to linear part :param l2_reg_embedding: float. L2 regularizer strength applied to embedding vector :param l2_reg_dnn: float. L2 regularizer strength applied to DNN :param init_std: float,to use as the initialize std of embedding vector :param seed: integer ,to use as random seed. :param dnn_dropout: float in [0,1), the probability we will drop out a given DNN coordinate. :param dnn_activation: Activation function to use in DNN :param dnn_use_bn: bool. Whether use BatchNormalization before activation or not in DNN :param task: str, ``"binary"`` for binary logloss or ``"regression"`` for regression loss :return: A Keras model instance. """ ## 为每个特征创建Input[1,]; feature == > {'feature1': Input[1,], ...} features = build_input_features(linear_feature_columns + dnn_feature_columns) ## [Input1, Input2, ... ] inputs_list = list(features.values()) sparse_embedding_list, dense_value_list = input_from_feature_columns( features, dnn_feature_columns, embedding_size, l2_reg_embedding, init_std, seed) ## [feature_1对应的embedding层,下连接对应feature1的Input[1,]层,...], [feature_1对应的Input[1,]层,...] linear_logit = get_linear_logit(features, linear_feature_columns, l2_reg=l2_reg_linear, init_std=init_std, seed=seed, prefix='linear') # linear_logit_finish = get_linear_logit(features, linear_feature_columns, l2_reg=l2_reg_linear, init_std=init_std, # seed=seed, prefix='linear_finish') # linear_logit_like = get_linear_logit(features, linear_feature_columns, l2_reg=l2_reg_linear, init_std=init_std, # seed=seed, prefix='linear_like') ## 线性变换层,没有激活函数 fm_input = concat_fun(sparse_embedding_list, axis=1) ## 稀疏embedding层concate在一起 fm_logit = FM()(fm_input) # fm_logit_finish = FM()(fm_input) # fm_logit_like = FM()(fm_input) ## FM的二次项部分输出,不包含一次项和bias dnn_input = combined_dnn_input(sparse_embedding_list, dense_value_list) # dnn_out = Dense(128, dnn_activation, l2_reg_dnn, dnn_dropout, # dnn_use_bn, seed)(dnn_input) dnn_out = DNN(dnn_hidden_units, dnn_activation, l2_reg_dnn, dnn_dropout, dnn_use_bn, seed)(dnn_input) mmoe_out = MMoE(units=16, num_experts=8, num_tasks=2)(dnn_out) [finish_in, like_in] = mmoe_out finish_out_1 = Dense(128, dnn_activation, kernel_regularizer=l2(l2_reg_dnn))(finish_in) finish_out = Dense(128, dnn_activation, kernel_regularizer=l2(l2_reg_dnn))(finish_out_1) finish_logit = tf.keras.layers.Dense(1, use_bias=False, activation=None)(finish_out) like_out_1 = Dense(128, dnn_activation, kernel_regularizer=l2(l2_reg_dnn))(like_in) like_out = Dense(128, dnn_activation, kernel_regularizer=l2(l2_reg_dnn))(like_out_1) # finish_logit_stop_grad = Lambda(lambda x: stop_gradient(x))(finish_out) # like_out_finish = concat_fun([like_out, finish_logit_stop_grad]) like_logit = tf.keras.layers.Dense(1, use_bias=False, activation=None)(like_out) dnn_logit = tf.keras.layers.Dense(1, use_bias=False, activation=None)(dnn_out) # if len(dnn_hidden_units) > 0 and only_dnn == True: # final_logit = dnn_logit # elif len(dnn_hidden_units) == 0 and use_fm == False: # only linear # final_logit = linear_logit # elif len(dnn_hidden_units) == 0 and use_fm == True: # linear + FM # final_logit = tf.keras.layers.add([linear_logit, fm_logit]) # elif len(dnn_hidden_units) > 0 and use_fm == False: # linear + Deep # final_logit = tf.keras.layers.add([linear_logit, dnn_logit]) # elif len(dnn_hidden_units) > 0 and use_fm == True: # linear + FM + Deep # final_logit = tf.keras.layers.add([linear_logit, fm_logit, dnn_logit]) # else: # raise NotImplementedError finish_logit = tf.keras.layers.add([linear_logit, fm_logit, finish_logit]) like_logit = tf.keras.layers.add([linear_logit, fm_logit, like_logit]) output_finish = PredictionLayer('binary', name='finish')(finish_logit) output_like = PredictionLayer('binary', name='like')(like_logit) model = tf.keras.models.Model(inputs=inputs_list, outputs=[output_finish, output_like]) return model
def xDeepFM_MTL( feature_dim_dict, embedding_size=8, hidden_size=(256, 256), cin_layer_size=( 256, 256, ), cin_split_half=True, task_net_size=(128, ), l2_reg_linear=0.000001, l2_reg_embedding=0.000001, seed=1024, ): check_feature_config_dict(feature_dim_dict) # 未知 if len(task_net_size) < 1: raise ValueError('task_net_size must be at least one layer') deep_emb_list, linear_logit, inputs_list = preprocess_input_embedding( feature_dim_dict, embedding_size, l2_reg_embedding, l2_reg_linear, 0.0001, seed) # video_input = tf.keras.layers.Input((128,)) # inputs_list.append(video_input) fm_input = concat_fun(deep_emb_list, axis=1) # 模型输入 ''' 构建CIN,默认CIN的size为[256,256],激活函数为relu,输入为 (batch_size,field_size,embedding_size),输出为(batch_size,feature_num)。 如果split_half为True,那么隐藏层的feature map只有一半的会连接到输出单元。 ''' if len(cin_layer_size) > 0: exFM_out = CIN(cin_layer_size, 'relu', cin_split_half, seed)(fm_input) exFM_logit = tf.keras.layers.Dense( 1, activation=None, )(exFM_out) # 全连接输出到Output_unit ''' Flatten将输入除了batch的维度,其他维度拉直,得到的输出为(batch_size, sum_size) 将embedding特征直接输入MLP ''' deep_input = tf.keras.layers.Flatten()(fm_input) deep_out = MLP(hidden_size)(deep_input) ''' 将deep_out过一个MLP,并全连接到finish的logits输出,同样的操作应用于like的logits输出 ''' finish_out = MLP(task_net_size)(deep_out) finish_logit = tf.keras.layers.Dense(1, use_bias=False, activation=None)(finish_out) like_out = MLP(task_net_size)(deep_out) like_logit = tf.keras.layers.Dense(1, use_bias=False, activation=None)(like_out) ''' 最终的finish的logit由linear_logit,finish_logit\like_logit和exFM_logit三者叠加。 ''' finish_logit = tf.keras.layers.add( [linear_logit, finish_logit, exFM_logit]) like_logit = tf.keras.layers.add([linear_logit, like_logit, exFM_logit]) ''' 将logit通过sigmoid转化为概率,通过输入和输出构建model ''' output_finish = PredictionLayer('sigmoid', name='finish')(finish_logit) output_like = PredictionLayer('sigmoid', name='like')(like_logit) model = tf.keras.models.Model(inputs=inputs_list, outputs=[output_finish, output_like]) return model
def DeepFM(linear_feature_columns, dnn_feature_columns, embedding_size=8, use_fm=True, use_only_dnn=False, dnn_hidden_units=(128, 128), l2_reg_linear=0.00001, l2_reg_embedding=0.00001, l2_reg_dnn=0, init_std=0.0001, seed=1024, dnn_dropout=0, dnn_activation='relu', dnn_use_bn=False, task='binary'): """Instantiates the DeepFM Network architecture. :param linear_feature_columns: An iterable containing all the features used by linear part of the model. :param dnn_feature_columns: An iterable containing all the features used by deep part of the model. :param embedding_size: positive integer,sparse feature embedding_size :param use_fm: bool,use FM part or not :param dnn_hidden_units: list,list of positive integer or empty list, the layer number and units in each layer of DNN :param l2_reg_linear: float. L2 regularizer strength applied to linear part :param l2_reg_embedding: float. L2 regularizer strength applied to embedding vector :param l2_reg_dnn: float. L2 regularizer strength applied to DNN :param init_std: float,to use as the initialize std of embedding vector :param seed: integer ,to use as random seed. :param dnn_dropout: float in [0,1), the probability we will drop out a given DNN coordinate. :param dnn_activation: Activation function to use in DNN :param dnn_use_bn: bool. Whether use BatchNormalization before activation or not in DNN :param task: str, ``"binary"`` for binary logloss or ``"regression"`` for regression loss :return: A Keras model instance. """ features = build_input_features(linear_feature_columns + dnn_feature_columns) inputs_list = list(features.values()) sparse_embedding_list, dense_value_list = input_from_feature_columns( features, dnn_feature_columns, embedding_size, l2_reg_embedding, init_std, seed) linear_logit = get_linear_logit(features, linear_feature_columns, l2_reg=l2_reg_linear, init_std=init_std, seed=seed, prefix='linear') fm_input = concat_fun(sparse_embedding_list, axis=1) fm_logit = FM()(fm_input) dnn_input = combined_dnn_input(sparse_embedding_list, dense_value_list) dnn_out = DNN(dnn_hidden_units, dnn_activation, l2_reg_dnn, dnn_dropout, dnn_use_bn, seed)(dnn_input) dnn_logit = tf.keras.layers.Dense(1, use_bias=False, activation=None)(dnn_out) if use_only_dnn == True: final_logit = dnn_logit elif len(dnn_hidden_units) == 0 and use_fm == False: # only linear final_logit = linear_logit elif len(dnn_hidden_units) == 0 and use_fm == True: # linear + FM final_logit = tf.keras.layers.add([linear_logit, fm_logit]) elif len(dnn_hidden_units) > 0 and use_fm == False: # linear + Deep final_logit = tf.keras.layers.add([linear_logit, dnn_logit]) elif len(dnn_hidden_units) > 0 and use_fm == True: # linear + FM + Deep final_logit = tf.keras.layers.add([linear_logit, fm_logit, dnn_logit]) else: raise NotImplementedError output = PredictionLayer(task)(final_logit) model = tf.keras.models.Model(inputs=inputs_list, outputs=output) return model
def DIEN(feature_dim_dict, seq_feature_list, embedding_size=8, hist_len_max=16, gru_type="GRU", use_negsampling=False, alpha=1.0, use_bn=False, dnn_hidden_units=(200, 80), dnn_activation='relu', att_hidden_units=(64, 16), att_activation="dice", att_weight_normalization=True, l2_reg_dnn=0, l2_reg_embedding=1e-6, dnn_dropout=0, init_std=0.0001, seed=1024, task='binary'): """Instantiates the Deep Interest Evolution Network architecture. :param feature_dim_dict: dict,to indicate sparse field (**now only support sparse feature**)like {'sparse':{'field_1':4,'field_2':3,'field_3':2},'dense':[]} :param seq_feature_list: list,to indicate sequence sparse field (**now only support sparse feature**),must be a subset of ``feature_dim_dict["sparse"]`` :param embedding_size: positive integer,sparse feature embedding_size. :param hist_len_max: positive int, to indicate the max length of seq input :param gru_type: str,can be GRU AIGRU AUGRU AGRU :param use_negsampling: bool, whether or not use negtive sampling :param alpha: float ,weight of auxiliary_loss :param use_bn: bool. Whether use BatchNormalization before activation or not in deep net :param dnn_hidden_units: list,list of positive integer or empty list, the layer number and units in each layer of DNN :param dnn_activation: Activation function to use in DNN :param att_hidden_units: list,list of positive integer , the layer number and units in each layer of attention net :param att_activation: Activation function to use in attention net :param att_weight_normalization: bool.Whether normalize the attention score of local activation unit. :param l2_reg_dnn: float. L2 regularizer strength applied to DNN :param l2_reg_embedding: float. L2 regularizer strength applied to embedding vector :param dnn_dropout: float in [0,1), the probability we will drop out a given DNN coordinate. :param init_std: float,to use as the initialize std of embedding vector :param seed: integer ,to use as random seed. :param task: str, ``"binary"`` for binary logloss or ``"regression"`` for regression loss :return: A Keras model instance. """ check_feature_config_dict(feature_dim_dict) sparse_input, dense_input, user_behavior_input, user_behavior_length = get_input( feature_dim_dict, seq_feature_list, hist_len_max) sparse_embedding_dict = {feat.name: Embedding(feat.dimension, embedding_size, embeddings_initializer=RandomNormal( mean=0.0, stddev=init_std, seed=seed), embeddings_regularizer=l2( l2_reg_embedding), name='sparse_emb_' + str(i) + '-' + feat.name) for i, feat in enumerate(feature_dim_dict["sparse"])} query_emb_list = get_embedding_vec_list(sparse_embedding_dict, sparse_input, feature_dim_dict["sparse"], return_feat_list=seq_feature_list) keys_emb_list = get_embedding_vec_list(sparse_embedding_dict, user_behavior_input, feature_dim_dict['sparse'], return_feat_list=seq_feature_list) deep_input_emb_list = get_embedding_vec_list(sparse_embedding_dict, sparse_input, feature_dim_dict['sparse']) query_emb = concat_fun(query_emb_list) keys_emb = concat_fun(keys_emb_list) deep_input_emb = concat_fun(deep_input_emb_list) if use_negsampling: neg_user_behavior_input = OrderedDict() for i, feat in enumerate(seq_feature_list): neg_user_behavior_input[feat] = Input(shape=(hist_len_max,), name='neg_seq_' + str(i) + '-' + feat) neg_uiseq_embed_list = get_embedding_vec_list(sparse_embedding_dict, neg_user_behavior_input, feature_dim_dict["sparse"], seq_feature_list, ) # [sparse_embedding_dict[feat]( # neg_user_behavior_input[feat]) for feat in seq_feature_list] neg_concat_behavior = concat_fun(neg_uiseq_embed_list) else: neg_concat_behavior = None hist, aux_loss_1 = interest_evolution(keys_emb, query_emb, user_behavior_length, gru_type=gru_type, use_neg=use_negsampling, neg_concat_behavior=neg_concat_behavior, embedding_size=embedding_size, att_hidden_size=att_hidden_units, att_activation=att_activation, att_weight_normalization=att_weight_normalization, ) deep_input_emb = Concatenate()([deep_input_emb, hist]) deep_input_emb = tf.keras.layers.Flatten()(deep_input_emb) if len(dense_input) > 0: deep_input_emb = Concatenate()( [deep_input_emb] + list(dense_input.values())) output = DNN(dnn_hidden_units, dnn_activation, l2_reg_dnn, dnn_dropout, use_bn, seed)(deep_input_emb) final_logit = Dense(1, use_bias=False)(output) output = PredictionLayer(task)(final_logit) model_input_list = get_inputs_list( [sparse_input, dense_input, user_behavior_input]) if use_negsampling: model_input_list += list(neg_user_behavior_input.values()) model_input_list += [user_behavior_length] model = tf.keras.models.Model(inputs=model_input_list, outputs=output) if use_negsampling: model.add_loss(alpha * aux_loss_1) tf.keras.backend.get_session().run(tf.global_variables_initializer()) return model