class WDL(object): def __init__(self, user_num=4691, movie_num=2514, year_num=76, genre_num=9, embedding_size=16): self.user_num = user_num self.movie_num = movie_num self.year_num = year_num self.genre_num = genre_num self.embedding_size = embedding_size self.fm = FM(user_num=self.user_num, movie_num=self.movie_num, year_num=self.year_num, genre_num=self.genre_num, embedding_size=self.embedding_size) """deep 端""" def get_deep_logit(self, sparse_embedding): x = Concatenate(axis=2)(sparse_embedding) x = Flatten()(x) # x = Lambda(lambda x: tf.squeeze(x,axis=1))(x) x = Dense(128, activation="relu")(x) x = Dense(128, activation="relu")(x) logit = Dense(1, use_bias=False)(x) return logit def get_wdl_model(self): """wide 端""" linear_logit = self.fm.get_linear_logit() """deep 端""" sparse_embedding = self.fm.get_sparse_embedding() deep_logit = self.get_deep_logit(sparse_embedding) x = Add()([linear_logit, deep_logit]) outputs = Dense(1, activation="sigmoid")(x) model = Model(inputs=self.fm.inputs, outputs=outputs) return model
class AFM(object): def __init__(self, user_num=4691, movie_num=2514, year_num=76, genre_num=9, embedding_size=16, attention_fator=16): """ :param user_num: :param movie_num: :param year_num: :param genre_num: :param embedding_size: :param attention_fator: attention dense num """ self.user_num = user_num self.movie_num = movie_num self.year_num = year_num self.genre_num = genre_num self.embedding_size = embedding_size self.attention_fator = attention_fator self.fm = FM(user_num=self.user_num, movie_num=self.movie_num, year_num=self.year_num, genre_num=self.genre_num, embedding_size=self.embedding_size) def get_afm_logit(self, sparse_embedding): afm_out = AFMLayer(self.attention_fator)(sparse_embedding) return afm_out def get_afm_model(self): linear_logit = self.fm.get_linear_logit() sparse_embedding = self.fm.get_sparse_embedding() afm_logit = self.get_afm_logit(sparse_embedding) x = Add()([linear_logit, afm_logit]) outputs = Dense(1, activation="sigmoid")(x) model = Model(inputs=self.fm.inputs, outputs=outputs) return model
class AutoInt(object): def __init__(self, user_num=4691, movie_num=2514, year_num=76, genre_num=9, embedding_size=16, att_embedding_size=16, head_num=2, att_layer_num=3, use_res=True): self.user_num = user_num self.movie_num = movie_num self.year_num = year_num self.genre_num = genre_num self.embedding_size = embedding_size self.att_layer_num = att_layer_num self.att_embedding_size = att_embedding_size self.head_num = head_num self.use_res = use_res self.fm = FM(user_num=self.user_num, movie_num=self.movie_num, year_num=self.year_num, genre_num=self.genre_num, embedding_size=self.embedding_size) def get_deep_output(self, inputs): x = Flatten()(inputs) x = Dense(128, activation="relu")(x) out = Dense(128, activation="relu")(x) return out def get_transformer_logit(self, sparse_embedding): inputs = Concatenate(axis=1)(sparse_embedding) x = inputs for i in range(self.att_layer_num): x = MutiHeadSelfAttention(self.att_embedding_size, self.head_num, self.use_res)(x) att_output = Flatten()(x) deep_output = self.get_deep_output(inputs) x = Concatenate()([att_output, deep_output]) out = Dense(1, use_bias=False)(x) return out def get_autoint_model(self): linear_logit = self.fm.get_linear_logit() sparse_embedding = self.fm.get_sparse_embedding() cross_logit = self.get_transformer_logit(sparse_embedding) x = Add()([linear_logit, cross_logit]) output = Dense(1, activation="sigmoid")(x) model = Model(inputs=self.fm.inputs, outputs=output) return model
class DCN(object): def __init__(self, user_num=4691, movie_num=2514, year_num=76, genre_num=9, embedding_size=16, dnn_hidden_units=( 128, 128, ), cross_layer_num=2): self.user_num = user_num self.movie_num = movie_num self.year_num = year_num self.genre_num = genre_num self.embedding_size = embedding_size self.dnn_hidden_units = dnn_hidden_units self.cross_layer_num = cross_layer_num self.fm = FM(user_num=self.user_num, movie_num=self.movie_num, year_num=self.year_num, genre_num=self.genre_num, embedding_size=self.embedding_size) """deep 端 并没有进行 Dense(1) 处理""" def get_deep_output(self, sparse_embedding): x = Concatenate(axis=2)(sparse_embedding) x = Flatten()(x) for unit in list(self.dnn_hidden_units): x = Dense(unit, activation="relu")(x) return x def get_cross_output(self, sparse_embedding): x = Concatenate(axis=2)(sparse_embedding) x = Flatten()(x) cross_output = CrossLayer(cross_layer_num=self.cross_layer_num)(x) return cross_output def get_dcn_model(self): linear_logit = self.fm.get_linear_logit() sparse_embedding = self.fm.get_sparse_embedding() deep_output = self.get_deep_output(sparse_embedding) cross_output = self.get_cross_output(sparse_embedding) x = Concatenate(axis=-1)([deep_output, cross_output]) cross_logit = Dense(1)(x) logit = Add()([linear_logit, cross_logit]) output = Dense(1, activation="sigmoid")(logit) model = Model(inputs=self.fm.inputs, outputs=output) return model