Exemplo n.º 1
0
 def __init__(self, field_dims, obsItem_dims, obsUser_dims, embed_dim, obs,
              embed):
     super().__init__()
     # print(field_dims, embed_dim)
     self.embedding = FeaturesEmbedding(field_dims, embed_dim)
     # print(field_dims[0:1], obsItem_dims)
     # input()
     # if obs:
     self.obsItem_coeff = FeaturesEmbedding(field_dims[0:1], obsItem_dims)
     self.obsUser_coeff = FeaturesEmbedding(field_dims[1:2], obsUser_dims)
     self.linear = FeaturesLinear(field_dims, bias=False)
     self.fm = FactorizationMachine(reduce_sum=True)
     self.obs = obs
     self.embed = embed
     assert obs or embed, "One of obs or embed must be true\n"
Exemplo n.º 2
0
 def __init__(self, field_dims, embed_dim, num_layers, mlp_dims, dropout):
     super().__init__()
     self.embedding = FeaturesEmbedding(field_dims, embed_dim)
     self.embed_output_dim = len(field_dims) * embed_dim
     self.cn = CrossNetwork(self.embed_output_dim, num_layers)
     self.mlp = MultiLayerPerceptron(self.embed_output_dim, mlp_dims, dropout, output_layer=False)
     self.linear = torch.nn.Linear(mlp_dims[-1] + self.embed_output_dim, 1)
Exemplo n.º 3
0
 def __init__(self,
              field_dims,
              embed_dim,
              atten_embed_dim,
              num_heads,
              num_layers,
              mlp_dims,
              dropouts,
              has_residual=True):
     super().__init__()
     self.num_fields = len(field_dims)
     self.linear = FeaturesLinear(field_dims)
     self.embedding = FeaturesEmbedding(field_dims, embed_dim)
     self.atten_embedding = torch.nn.Linear(embed_dim, atten_embed_dim)
     self.embed_output_dim = len(field_dims) * embed_dim
     self.atten_output_dim = len(field_dims) * atten_embed_dim
     self.has_residual = has_residual
     self.mlp = MultiLayerPerceptron(self.embed_output_dim, mlp_dims,
                                     dropouts[1])
     self.self_attns = torch.nn.ModuleList([
         torch.nn.MultiheadAttention(atten_embed_dim,
                                     num_heads,
                                     dropout=dropouts[0])
         for _ in range(num_layers)
     ])
     self.attn_fc = torch.nn.Linear(self.atten_output_dim, 1)
     if self.has_residual:
         self.V_res_embedding = torch.nn.Linear(embed_dim, atten_embed_dim)
    def __init__(self,
                 field_dims,
                 embed_dim,
                 num_layers,
                 mlp_dims,
                 dropout,
                 training_method='dfa'):
        super().__init__()
        self.embedding = FeaturesEmbedding(field_dims, embed_dim)
        self.embed_output_dim = len(field_dims) * embed_dim
        self.cn = DFACrossNetwork(self.embed_output_dim,
                                  num_layers,
                                  dfa_output=False)
        self.mlp = DFAMultiLayerPerceptron(self.embed_output_dim,
                                           mlp_dims,
                                           dropout,
                                           output_layer=False,
                                           dfa_output=False)
        self.linear = torch.nn.Linear(mlp_dims[-1] + self.embed_output_dim, 1)

        self.dfa_embeddings = DFALayer()
        self.dfa_stack = DFALayer()
        self.dfa = DFA(dfa_layers=[
            *self.cn.dfa_layers, *self.mlp.dfa_layers, self.dfa_stack,
            self.dfa_embeddings
        ],
                       feedback_points_handling=FeedbackPointsHandling.LAST,
                       no_training=training_method != 'dfa')
Exemplo n.º 5
0
 def __init__(self, field_dims, embed_dim, mlp_dims, dropout):
     super().__init__()
     self.linear = FeaturesLinear(field_dims)
     self.fm = FactorizationMachine(reduce_sum=True)
     self.embedding = FeaturesEmbedding(field_dims, embed_dim)
     self.embed_output_dim = len(field_dims) * embed_dim
     self.mlp = MultiLayerPerceptron(self.embed_output_dim, mlp_dims, dropout)
Exemplo n.º 6
0
    def __init__(self, field_dims, embed_dim):

        super().__init__()

        self.embedding = FeaturesEmbedding(field_dims, embed_dim)
        self.linear = FeaturesLinear(field_dims)
        self.fm = FactorizationMachine(reduce_sum=True)
Exemplo n.º 7
0
 def __init__(self, field_dims, user_field_idx, item_field_idx, embed_dim, mlp_dims, dropout):
     super().__init__()
     self.user_field_idx = user_field_idx
     self.item_field_idx = item_field_idx
     self.embedding = FeaturesEmbedding(field_dims, embed_dim)
     self.embed_output_dim = len(field_dims) * embed_dim
     self.mlp = MultiLayerPerceptron(self.embed_output_dim, mlp_dims, dropout, output_layer=False)
     self.fc = torch.nn.Linear(mlp_dims[-1] + embed_dim, 1)
Exemplo n.º 8
0
    def __init__(self, field_dims, embed_dim, mlp_dims, dropout):

        super().__init__()

        self.embedding = FeaturesEmbedding(field_dims, embed_dim)
        self.embed_output_dim = len(field_dims) * embed_dim
        self.mlp = MultiLayerPerceptron(self.embed_output_dim, mlp_dims,
                                        dropout)
Exemplo n.º 9
0
 def __init__(self, field_dims, embed_dim, mlp_dims, dropouts):
     super().__init__()
     self.embedding = FeaturesEmbedding(field_dims, embed_dim)
     self.linear = FeaturesLinear(field_dims)
     self.fm = torch.nn.Sequential(FactorizationMachine(reduce_sum=False),
                                   torch.nn.BatchNorm1d(embed_dim),
                                   torch.nn.Dropout(dropouts[0]))
     self.mlp = MultiLayerPerceptron(embed_dim, mlp_dims, dropouts[1])
Exemplo n.º 10
0
    def __init__(self, field_dims, embed_dim, attn_size, dropouts):

        super().__init__()

        self.num_fields = len(field_dims)
        self.embedding = FeaturesEmbedding(field_dims, embed_dim)

        self.linear = FeaturesLinear(field_dims)
        self.afm = AttentionalFactorizationMachine(embed_dim, attn_size, dropouts)
Exemplo n.º 11
0
 def __init__(self, field_dims, embed_dim, LNN_dim, mlp_dims, dropouts):
     super().__init__()
     self.num_fields = len(field_dims)
     self.linear = FeaturesLinear(field_dims)  # Linear
     self.embedding = FeaturesEmbedding(field_dims, embed_dim)  # Embedding
     self.LNN_dim = LNN_dim
     self.LNN_output_dim = self.LNN_dim * embed_dim
     self.LNN = LNN(self.num_fields, embed_dim, LNN_dim)
     self.mlp = MultiLayerPerceptron(self.LNN_output_dim, mlp_dims,
                                     dropouts[0])
Exemplo n.º 12
0
 def __init__(self, field_dims, embed_dim, mlp_dims, dropout, method='inner'):
     super().__init__()
     num_fields = len(field_dims)
     if method == 'inner':
         self.pn = InnerProductNetwork()
     elif method == 'outer':
         self.pn = OuterProductNetwork(num_fields, embed_dim)
     else:
         raise ValueError('unknown product type: ' + method)
     self.embedding = FeaturesEmbedding(field_dims, embed_dim)
     self.linear = FeaturesLinear(field_dims, embed_dim)
     self.embed_output_dim = num_fields * embed_dim
     self.mlp = MultiLayerPerceptron(num_fields * (num_fields - 1) // 2 + self.embed_output_dim, mlp_dims, dropout)
Exemplo n.º 13
0
 def __init__(self,
              field_dims,
              embed_dim,
              mlp_dims,
              dropout,
              cross_layer_sizes,
              split_half=True):
     super().__init__()
     self.embedding = FeaturesEmbedding(field_dims, embed_dim)
     self.embed_output_dim = len(field_dims) * embed_dim
     self.cin = CompressedInteractionNetwork(len(field_dims),
                                             cross_layer_sizes, split_half)
     self.mlp = MultiLayerPerceptron(self.embed_output_dim, mlp_dims,
                                     dropout)
     self.linear = FeaturesLinear(field_dims)
Exemplo n.º 14
0
 def __init__(self, field_dims, order, embed_dim):
     super().__init__()
     if order < 1:
         raise ValueError(f'invalid order: {order}')
     self.order = order
     self.embed_dim = embed_dim
     self.linear = FeaturesLinear(field_dims)
     if order >= 2:
         self.embedding = FeaturesEmbedding(field_dims,
                                            embed_dim * (order - 1))
         self.fm = FactorizationMachine(reduce_sum=True)
     if order >= 3:
         self.kernels = torch.nn.ModuleList([
             AnovaKernel(order=i, reduce_sum=True)
             for i in range(3, order + 1)
         ])
 def __init__(self,
              field_dims,
              embed_dim,
              mlp_dims,
              dropout,
              training_method='dfa'):
     super().__init__()
     self.linear = FeaturesLinear(field_dims)
     self.fm = FactorizationMachine(reduce_sum=True)
     self.embedding = FeaturesEmbedding(
         field_dims, embed_dim)  # Trained through FM. OK: no weights in FM.
     self.embed_output_dim = len(field_dims) * embed_dim
     self.mlp = DFAMultiLayerPerceptron(self.embed_output_dim,
                                        mlp_dims,
                                        dropout,
                                        training_method=training_method)
Exemplo n.º 16
0
 def __init__(self, field_dims, embed_dim, num_heads, num_layers, mlp_dims,
              dropouts):
     super().__init__()
     self.embed_dim = embed_dim
     self.num_fields = len(field_dims)
     self.linear = FeaturesLinear(field_dims)
     self.embedding = FeaturesEmbedding(field_dims, embed_dim)
     self.embed_output_dim = len(field_dims) * embed_dim
     # self.res = torch.nn.Linear(self.embed_output_dim,self.embed_output_dim)
     self.mlp = MultiLayerPerceptron(self.embed_output_dim + 399, mlp_dims,
                                     dropouts[1])
     self.self_attns = torch.nn.ModuleList([
         torch.nn.MultiheadAttention(embed_dim,
                                     num_heads,
                                     dropout=dropouts[0])
         for _ in range(num_layers)
     ])
     self.attn_fc = torch.nn.Linear(self.embed_output_dim, 1)
    def __init__(self,
                 field_dims,
                 embed_dim,
                 atten_embed_dim,
                 num_heads,
                 num_layers,
                 mlp_dims,
                 dropouts,
                 has_residual=True,
                 training_method='dfa'):
        super().__init__()
        self.num_fields = len(field_dims)
        self.linear = FeaturesLinear(field_dims)
        self.embedding = FeaturesEmbedding(field_dims, embed_dim)
        self.dfa_embedding = DFALayer()
        self.atten_embedding = torch.nn.Linear(embed_dim, atten_embed_dim)
        self.dfa_atten_embedding = DFALayer()
        self.embed_output_dim = len(field_dims) * embed_dim
        self.atten_output_dim = len(field_dims) * atten_embed_dim
        self.has_residual = has_residual
        self.mlp = DFAMultiLayerPerceptron(self.embed_output_dim,
                                           mlp_dims,
                                           dropouts[1],
                                           dfa_output=False)
        self.self_attns = torch.nn.ModuleList([
            torch.nn.MultiheadAttention(atten_embed_dim,
                                        num_heads,
                                        dropout=dropouts[0])
            for _ in range(num_layers)
        ])
        self.dfa_self_attns = [DFALayer() for _ in range(num_layers - 1)]

        self.attn_fc = torch.nn.Linear(self.atten_output_dim, 1)

        if self.has_residual:
            self.V_res_embedding = torch.nn.Linear(embed_dim, atten_embed_dim)

        self.dfa_cross = DFALayer()
        self.dfa = DFA(dfa_layers=[
            self.dfa_atten_embedding, *self.dfa_self_attns, self.dfa_cross,
            *self.mlp.dfa_layers, self.dfa_embedding
        ],
                       feedback_points_handling=FeedbackPointsHandling.LAST,
                       no_training=training_method != 'dfa')
    def __init__(self,
                 field_dims,
                 embed_dim,
                 LNN_dim,
                 mlp_dims,
                 dropouts,
                 training_method='dfa'):
        super().__init__()
        self.num_fields = len(field_dims)
        self.linear = FeaturesLinear(field_dims)  # Linear
        self.embedding = FeaturesEmbedding(field_dims, embed_dim)  # Embedding
        self.dfa_embed = DFALayer()
        self.LNN_dim = LNN_dim
        self.LNN_output_dim = self.LNN_dim * embed_dim
        self.LNN = LNN(self.num_fields, embed_dim, LNN_dim)
        self.dfa_lnn = DFALayer()
        self.mlp = DFAMultiLayerPerceptron(self.LNN_output_dim,
                                           mlp_dims,
                                           dropouts[0],
                                           dfa_output=False)

        self.dfa = DFA(dfa_layers=[self.dfa_lnn, *self.mlp.dfa_layers],
                       feedback_points_handling=FeedbackPointsHandling.LAST,
                       no_training=training_method != 'dfa')