def __init__(self, opt):
     super(DeepFM, self).__init__(opt)
     self.embed_output_dim = len(self.field_dims) * self.latent_dim
     self.mlp_dims = opt['mlp_dims']
     self.mlp = MultiLayerPerceptron(self.embed_output_dim,
                                     self.mlp_dims,
                                     dropout=0.2)
예제 #2
0
파일: afi.py 프로젝트: zhangy10/pytorch-fm
 def __init__(self,
              field_dims,
              embed_dim,
              atten_embed_dim,
              num_heads,
              num_layers,
              mlp_dims,
              dropouts,
              has_residual=True):
     super().__init__()
     self.num_fields = len(field_dims)
     self.linear = FeaturesLinear(field_dims)
     self.embedding = FeaturesEmbedding(field_dims, embed_dim)
     self.atten_embedding = torch.nn.Linear(embed_dim, atten_embed_dim)
     self.embed_output_dim = len(field_dims) * embed_dim
     self.atten_output_dim = len(field_dims) * atten_embed_dim
     self.has_residual = has_residual
     self.mlp = MultiLayerPerceptron(self.embed_output_dim, mlp_dims,
                                     dropouts[1])
     self.self_attns = torch.nn.ModuleList([
         torch.nn.MultiheadAttention(atten_embed_dim,
                                     num_heads,
                                     dropout=dropouts[0])
         for _ in range(num_layers)
     ])
     self.attn_fc = torch.nn.Linear(self.atten_output_dim, 1)
     if self.has_residual:
         self.V_res_embedding = torch.nn.Linear(embed_dim, atten_embed_dim)
예제 #3
0
 def __init__(self, field_dims, embed_dim, num_layers, mlp_dims, dropout):
     super().__init__()
     self.embedding = FeaturesEmbedding(field_dims, embed_dim)
     self.embed_output_dim = len(field_dims) * embed_dim
     self.cn = CrossNetwork(self.embed_output_dim, num_layers)
     self.mlp = MultiLayerPerceptron(self.embed_output_dim, mlp_dims, dropout, output_layer=False)
     self.linear = torch.nn.Linear(mlp_dims[-1] + self.embed_output_dim, 1)
예제 #4
0
파일: dfm.py 프로젝트: zhangy10/pytorch-fm
 def __init__(self, field_dims, embed_dim, mlp_dims, dropout):
     super().__init__()
     self.linear = FeaturesLinear(field_dims)
     self.fm = FactorizationMachine(reduce_sum=True)
     self.embedding = FeaturesEmbedding(field_dims, embed_dim)
     self.embed_output_dim = len(field_dims) * embed_dim
     self.mlp = MultiLayerPerceptron(self.embed_output_dim, mlp_dims, dropout)
예제 #5
0
    def __init__(self, field_dims, embed_dim, mlp_dims, dropout):

        super().__init__()

        self.embedding = FeaturesEmbedding(field_dims, embed_dim)
        self.embed_output_dim = len(field_dims) * embed_dim
        self.mlp = MultiLayerPerceptron(self.embed_output_dim, mlp_dims,
                                        dropout)
예제 #6
0
파일: nfm.py 프로젝트: zhangy10/pytorch-fm
 def __init__(self, field_dims, embed_dim, mlp_dims, dropouts):
     super().__init__()
     self.embedding = FeaturesEmbedding(field_dims, embed_dim)
     self.linear = FeaturesLinear(field_dims)
     self.fm = torch.nn.Sequential(FactorizationMachine(reduce_sum=False),
                                   torch.nn.BatchNorm1d(embed_dim),
                                   torch.nn.Dropout(dropouts[0]))
     self.mlp = MultiLayerPerceptron(embed_dim, mlp_dims, dropouts[1])
예제 #7
0
 def __init__(self, field_dims, user_field_idx, item_field_idx, embed_dim, mlp_dims, dropout):
     super().__init__()
     self.user_field_idx = user_field_idx
     self.item_field_idx = item_field_idx
     self.embedding = FeaturesEmbedding(field_dims, embed_dim)
     self.embed_output_dim = len(field_dims) * embed_dim
     self.mlp = MultiLayerPerceptron(self.embed_output_dim, mlp_dims, dropout, output_layer=False)
     self.fc = torch.nn.Linear(mlp_dims[-1] + embed_dim, 1)
예제 #8
0
파일: afn.py 프로젝트: zhangy10/pytorch-fm
 def __init__(self, field_dims, embed_dim, LNN_dim, mlp_dims, dropouts):
     super().__init__()
     self.num_fields = len(field_dims)
     self.linear = FeaturesLinear(field_dims)  # Linear
     self.embedding = FeaturesEmbedding(field_dims, embed_dim)  # Embedding
     self.LNN_dim = LNN_dim
     self.LNN_output_dim = self.LNN_dim * embed_dim
     self.LNN = LNN(self.num_fields, embed_dim, LNN_dim)
     self.mlp = MultiLayerPerceptron(self.LNN_output_dim, mlp_dims,
                                     dropouts[0])
예제 #9
0
 def __init__(self, field_dims, embed_dim, mlp_dims, dropouts):
     super().__init__()
     self.linear = LogisticRegressionModel(field_dims)
     self.ffm = torch.nn.Sequential(
         FieldAwareFactorizationMachine(field_dims, embed_dim),
         torch.nn.BatchNorm1d(embed_dim), torch.nn.Dropout(dropouts[0]))
     self.ffm_output_dim = len(field_dims) * (len(field_dims) -
                                              1) // 2 * embed_dim
     self.mlp = MultiLayerPerceptron(self.ffm_output_dim, mlp_dims,
                                     dropouts[1])
예제 #10
0
 def __init__(self, field_dims, embed_dim, mlp_dims, dropouts):
     super().__init__()
     self.linear = FeaturesLinear(field_dims)
     self.ffm = FieldAwareFactorizationMachine(field_dims, embed_dim)
     self.ffm_output_dim = len(field_dims) * (len(field_dims) -
                                              1) // 2 * embed_dim
     self.bn = torch.nn.BatchNorm1d(self.ffm_output_dim)
     self.dropout = torch.nn.Dropout(dropouts[0])
     self.mlp = MultiLayerPerceptron(self.ffm_output_dim, mlp_dims,
                                     dropouts[1])
예제 #11
0
 def __init__(self, field_dims, embed_dim, mlp_dims, dropout, method='inner'):
     super().__init__()
     num_fields = len(field_dims)
     if method == 'inner':
         self.pn = InnerProductNetwork()
     elif method == 'outer':
         self.pn = OuterProductNetwork(num_fields, embed_dim)
     else:
         raise ValueError('unknown product type: ' + method)
     self.embedding = FeaturesEmbedding(field_dims, embed_dim)
     self.linear = FeaturesLinear(field_dims, embed_dim)
     self.embed_output_dim = num_fields * embed_dim
     self.mlp = MultiLayerPerceptron(num_fields * (num_fields - 1) // 2 + self.embed_output_dim, mlp_dims, dropout)
예제 #12
0
class DeepFM(FM):
    def __init__(self, opt):
        super(DeepFM, self).__init__(opt)
        self.embed_output_dim = len(self.field_dims) * self.latent_dim
        self.mlp_dims = opt['mlp_dims']
        self.mlp = MultiLayerPerceptron(self.embed_output_dim, self.mlp_dims, dropout=0.2)

    def forward(self, x):
        linear_score = self.linear.forward(x)
        xv = self.embedding(x)
        fm_score = self.fm.forward(xv)
        dnn_score = self.mlp.forward(xv.view(-1, self.embed_output_dim))
        score = linear_score + fm_score + dnn_score
        return score.squeeze(1)
예제 #13
0
 def __init__(self,
              field_dims,
              embed_dim,
              mlp_dims,
              dropout,
              cross_layer_sizes,
              split_half=True):
     super().__init__()
     self.embedding = FeaturesEmbedding(field_dims, embed_dim)
     self.embed_output_dim = len(field_dims) * embed_dim
     self.cin = CompressedInteractionNetwork(len(field_dims),
                                             cross_layer_sizes, split_half)
     self.mlp = MultiLayerPerceptron(self.embed_output_dim, mlp_dims,
                                     dropout)
     self.linear = FeaturesLinear(field_dims)
예제 #14
0
 def __init__(self, field_dims, embed_dim, num_heads, num_layers, mlp_dims,
              dropouts):
     super().__init__()
     self.embed_dim = embed_dim
     self.num_fields = len(field_dims)
     self.linear = FeaturesLinear(field_dims)
     self.embedding = FeaturesEmbedding(field_dims, embed_dim)
     self.embed_output_dim = len(field_dims) * embed_dim
     # self.res = torch.nn.Linear(self.embed_output_dim,self.embed_output_dim)
     self.mlp = MultiLayerPerceptron(self.embed_output_dim + 399, mlp_dims,
                                     dropouts[1])
     self.self_attns = torch.nn.ModuleList([
         torch.nn.MultiheadAttention(embed_dim,
                                     num_heads,
                                     dropout=dropouts[0])
         for _ in range(num_layers)
     ])
     self.attn_fc = torch.nn.Linear(self.embed_output_dim, 1)