def __init__(self, feature_metas, embedding_dim=8, hidden_units=(128, 64), use_dnn=True, linear_bias=False): super(AFM, self).__init__() self.feature_metas = feature_metas self.linear_model = LinearModel(feature_metas, linear_bias=linear_bias) self.embeddings = GroupEmbedding(feature_metas, embedding_dim) self.att_pool = AttentionPooling(feature_metas, embedding_dim=embedding_dim, attention_factor=4) self.dnn = MLP([embedding_dim] + list(hidden_units), dropout_prob=0.5, use_bn=True) if use_dnn else None self.pred = Prediction()
def __init__(self, feature_metas, embedding_dim=8, hidden_units=(128, 64), linear_bias=False): super(WideDeep, self).__init__() self.feature_metas = feature_metas self.linear_model = LinearModel(feature_metas, linear_bias=linear_bias) self.embeddings = GroupEmbedding(feature_metas, embedding_dim) self.dnn = MLP([len(feature_metas) * embedding_dim] + list(hidden_units), activation='relu', dropout_prob=0.5, use_bn=True) self.pred = Prediction()
def __init__(self, feature_metas, embedding_dim=8, hidden_units=(256, 64), linear_bias=False, dense_bias=False): super(xDeepFM, self).__init__() self.feature_metas = feature_metas self.linear_model = LinearModel(feature_metas, linear_bias=linear_bias) self.embeddings = GroupEmbedding(feature_metas, embedding_dim) self.cin = CIN(input_dims=(len(feature_metas), embedding_dim)) self.dnn = MLP(hidden_units=[len(feature_metas) * embedding_dim] + list(hidden_units), use_bn=True) self.pred = Prediction()
def __init__(self, feature_metas, embedding_dim=8, hidden_units=(256, 64), linear_bias=False, dense_bias=False): super(NFFM, self).__init__() self.feature_metas = feature_metas self.linear_model = LinearModel(feature_metas, linear_bias=linear_bias) self.embeddings = GroupEmbedding(feature_metas, embedding_dim) # self.fm = FM() self.dnn = MLP(hidden_units=[len(feature_metas) * embedding_dim] + list(hidden_units), bias=dense_bias) self.pred = Prediction()
def __init__(self, feature_metas, embedding_dim=8, hidden_units=(128, 64), bi_dropout_prob=0., linear_bias=False): super(NFM, self).__init__() self.feature_metas = feature_metas self.linear_model = LinearModel(feature_metas, linear_bias=linear_bias) self.embeddings = GroupEmbedding(feature_metas, embedding_dim) self.bi_interaction = BiInteractionPooling(bi_dropout_prob) self.dnn = MLP([embedding_dim] + list(hidden_units), activation='relu', use_bn=True) self.pred = Prediction(task='binary')
def __init__(self, feature_metas, embedding_dim=8, hidden_units=(128, 64), linear_bias=False): super(DCN, self).__init__() self.feature_metas = feature_metas self.linear_model = LinearModel(feature_metas, linear_bias=linear_bias) self.embeddings = GroupEmbedding(feature_metas, embedding_dim) self.cross = CrossNet(len(feature_metas), embedding_dim, layer_num=3) self.dnn = MLP([len(feature_metas) * embedding_dim] + list(hidden_units), activation='relu', use_bn=True) self.pred = Prediction()
def __init__(self, feature_metas, embedding_dim=8, hidden_units=(128, 64), product_mode='outer', linear_bias=False): super(PNN, self).__init__() # mode : 'inner' or 'outer' self.feature_metas = feature_metas self.linear_model = LinearModel(feature_metas, linear_bias) self.embeddings = GroupEmbedding(feature_metas, embedding_dim) if product_mode == 'inner': self.product = InnerProduct(feature_metas, embedding_dim) else: self.product = OuterProduct(feature_metas, embedding_dim) m = (len(feature_metas) + 1) * len( feature_metas) // 2 # TODO, require_logits affect this shape self.dnn = MLP([m] + list(hidden_units), use_bn=True) self.pred = Prediction()
def __init__(self, feature_metas, embedding_dim=8, linear_bias=False): super(FM, self).__init__() self.linear_model = LinearModel(feature_metas, linear_bias=linear_bias) self.embeddings = GroupEmbedding(feature_metas, embedding_dim) self.pred = Prediction()