Пример #1
0
    def __init__(self, field_dims, embed_dim, mlp_layers=(400, 400, 400), dropouts=(0.5, 0.5)):
        super(NeuralFactorizationMachineModel, self).__init__()

        self.linear = FeaturesLinear(field_dims)  # 线性部分
        self.embedding = FeaturesEmbedding(field_dims, embed_dim)  # 交互部分
        self.fm = FactorizationMachine(embed_dim, reduce_sum=False)
        self.mlp = MultiLayerPerceptron(embed_dim, mlp_layers, dropouts[1])
Пример #2
0
    def __init__(self,
                 field_dims,
                 embed_dim,
                 mlp_layers=(128, 64),
                 dropout=0.5,
                 field_len=10):
        super(DeepFactorizationMachineModel, self).__init__()
        self.linear = FeaturesLinear(field_dims)
        self.fm = FactorizationMachine(embed_dim=embed_dim, reduce_sum=True)

        self.embedding = FeaturesEmbedding(field_dims, embed_dim)
        self.embed_output_size = field_len * embed_dim
        self.mlp = MultiLayerPerceptron(self.embed_output_size, mlp_layers,
                                        dropout)
Пример #3
0
class NeuralFactorizationMachineModel(torch.nn.Module):
    """
    A pytorch implementation of Neural Factorization Machine.

    将FM的第二部分,
    Reference:
        X He and TS Chua, Neural Factorization Machines for Sparse Predictive Analytics, 2017.
    """

    def __init__(self, field_dims, embed_dim, mlp_layers=(400, 400, 400), dropouts=(0.5, 0.5)):
        super(NeuralFactorizationMachineModel, self).__init__()

        self.linear = FeaturesLinear(field_dims)  # 线性部分
        self.embedding = FeaturesEmbedding(field_dims, embed_dim)  # 交互部分
        self.fm = FactorizationMachine(embed_dim, reduce_sum=False)
        self.mlp = MultiLayerPerceptron(embed_dim, mlp_layers, dropouts[1])

    def forward(self, x):
        """
        :param x: [B,n_f]
        :return: [B,1]
        """
        x_fm = self.fm(self.embedding(x))  # [B,d]
        x = self.linear(x) + self.mlp(x_fm)
        return x

    def get_l2_loss(self, lambdas=1e-5):
        regularization_loss = 0
        for parameter in self.mlp.parameters():
            regularization_loss += torch.sum(parameter.pow(2))
        return lambdas*regularization_loss
Пример #4
0
 def __init__(self, field_dims, embed_dim, mlp_dims, dropout):
     super().__init__()
     self.linear = FeaturesLinear(field_dims)
     self.fm = FactorizationMachine(reduce_sum=True)
     self.embedding = FeaturesEmbedding(field_dims, embed_dim)
     self.embed_output_dim = len(field_dims) * embed_dim
     self.mlp = MultiLayerPerceptron(self.embed_output_dim, mlp_dims, dropout)
Пример #5
0
    def __init__(self,
                 field_dims,
                 embed_dim,
                 mlp_dims,
                 dropout=0.5,
                 field_len=10):
        super(WideAndDeepModel, self).__init__()
        #     wide 部分: linear + FM
        self.linear = FeaturesLinear(field_dims)

        self.embedding = FeaturesEmbedding(field_dims, embed_dim)

        self.embed_output_dim = field_len * embed_dim
        self.mlp = MultiLayerPerceptron(self.embed_output_dim,
                                        mlp_dims,
                                        dropout=dropout)
Пример #6
0
 def __init__(self, field_dims, embed_dims, mlp_dims, dropout):
     super().__init__()
     self.linear = FeaturesLinear(field_dims)
     self.embedding = FeaturesEmbedding(field_dims, embed_dims)
     self.embed_output_dim = len(field_dims) * embed_dims
     self.mlp = MultiLayerPerceptron(self.embed_output_dim, mlp_dims,
                                     dropout)
Пример #7
0
 def __init__(self, field_dims, embed_dim, mlp_dims, dropouts):
     super().__init__()
     self.embedding = FeaturesEmbedding(field_dims, embed_dim)
     self.linear = FeaturesLinear(field_dims)
     self.fm = torch.nn.Sequential(FactorizationMachine(reduce_sum=False),
                                   torch.nn.BatchNorm1d(embed_dim),
                                   torch.nn.Dropout(dropouts[0]))
     self.mlp = MultiLayerPerceptron(embed_dim, mlp_dims, dropouts[1])
Пример #8
0
 def __init__(self, field_dims, user_field_idx, item_field_idx, embed_dim, mlp_dims, dropout):
     super().__init__()
     self.user_field_idx = user_field_idx
     self.item_field_idx = item_field_idx
     self.embedding = FeaturesEmbedding(field_dims, embed_dim)
     self.embed_output_dim = len(field_dims) * embed_dim
     self.mlp = MultiLayerPerceptron(self.embed_output_dim, mlp_dims, dropout, output_layer=False)
     self.fc = torch.nn.Linear(mlp_dims[-1] + embed_dim, 1)
Пример #9
0
    def __init__(self, field_dims, embed_dim, mlp_layers=(400, 400, 400), dropouts=(0.5, 0.5)):
        super(NON2, self).__init__()

        self.linear = FeaturesLinear(field_dims)  # 线性部分
        # self.embedding = FeaturesEmbedding(field_dims, embed_dim)  # embedding
        self.embedding = FeaturesEmbeddingWithGlobalIn(field_dims, embed_dim)  # embedding

        self.dnn = MultiLayerPerceptron(embed_dim*len(field_dims), mlp_layers,dropout=dropouts[0], output_layer=False)

        self.atten_embedding = torch.nn.Linear(embed_dim, 32)
        self.atten_output_dim = len(field_dims) * 32

        self.self_attns = torch.nn.ModuleList([
                torch.nn.MultiheadAttention(32, 4, dropout=dropouts[0]) for _ in range(3)
            ])

        self.input_dim = 400 + self.atten_output_dim + 1
        self.mlp = MultiLayerPerceptron(self.input_dim, embed_dims=(64,32), dropout=dropouts[1])
Пример #10
0
    def __init__(self, field_dims, embed_dim, mlp_layers=(400,400,400),dropouts=(0.5, 0.5),type="glu"):
        super(GFRLNFM, self).__init__()

        self.linear = FeaturesLinear(field_dims)  # 线性部分

        self.embedding_gate = FeaturesEmbeddingWithGlobalIn(field_dims,embed_dim,type=type)

        self.fm = FactorizationMachine(reduce_sum=False)

        self.mlp = MultiLayerPerceptron(embed_dim, mlp_layers, dropouts[1])
Пример #11
0
    def __init__(self,
                 field_dims,
                 embed_dim,
                 mlp_layers=(256, 128, 64),
                 dropout=0.5):
        super(FactorizationSupportedNeuralNetworkModel, self).__init__()
        self.embedding = FeaturesEmbedding(field_dims, embed_dim)

        self.mlp_input_dim = len(field_dims) * embed_dim

        self.mlp = MultiLayerPerceptron(self.mlp_input_dim, mlp_layers)
Пример #12
0
    def __init__(self, field_dims, embed_dim, mlp_dims, dropout=0.5):
        super(GELWDL, self).__init__()
        #     wide 部分: linear + FM
        self.linear = FeaturesLinear(field_dims)

        self.embedding = FeaturesEmbeddingWithGlobalIn(field_dims, embed_dim)

        self.embed_output_dim = len(field_dims) * embed_dim
        self.mlp = MultiLayerPerceptron(self.embed_output_dim,
                                        mlp_dims,
                                        dropout=dropout)
Пример #13
0
 def __init__(self,
              field_dims,
              embed_dim,
              mlp_dims,
              dropout,
              cross_layer_sizes,
              split_half=True):
     super().__init__()
     self.embedding = FeaturesEmbedding(field_dims, embed_dim)
     self.embed_output_dim = len(field_dims) * embed_dim
     self.cin = CompressedInteractionNetwork(len(field_dims),
                                             cross_layer_sizes, split_half)
     self.mlp = MultiLayerPerceptron(self.embed_output_dim, mlp_dims,
                                     dropout)
     self.linear = FeaturesLinear(field_dims)
Пример #14
0
    def __init__(self, field_dims, embed_dim, mlp_layers=(400,400,400), glunum=2, dropouts=(0.1, 0.1)):
        super().__init__()

        self.linear = FeaturesLinear(field_dims)  # 线性部分
        # self.embedding_gate = FeaturesEmbeddingWithGate(field_dims,embed_dim,glu_num=glunum)
        self.embedding = FeaturesEmbedding(field_dims, embed_dim)  # 交互部分,先
        # 第二层: 门机制,提取信息
        #
        self.gene_inter = GenerateConv()

        glu_list = list()
        for _ in range(glunum):
            glu_list.append(GLUActivation1D(embed_dim, int(embed_dim*2)))
        self.glus = torch.nn.Sequential(*glu_list)
        # self.mlp_input = len(field_dims) * (len(field_dims)-1)/2
        self.mlp = MultiLayerPerceptron(embed_dim, mlp_layers, dropouts[1])
Пример #15
0
    def __init__(self, field_dims, embed_dim, mlp_dims, dropout):
        """
        :param field_dims: Number of input dimensions
        :param embed_dim: Number of dense embedding dimensions
        :param mlp_dims: Number of hidden layers
        :param dropout: dropout rate
        """
        super().__init__()

        # Wide Learning Component
        self.linear = FeaturesLinear(field_dims)

        # Deep Learning Component
        self.embedding = FeaturesEmbedding(field_dims, embed_dim)
        self.embed_output_dim = len(field_dims) * embed_dim
        self.mlp = MultiLayerPerceptron(self.embed_output_dim, mlp_dims,
                                        dropout)
Пример #16
0
    def __init__(self,
                 field_dims,
                 embed_dim,
                 mlp_layers=(128, 64),
                 dropout=0.5,
                 type="glu"):
        super(GELDFM, self).__init__()
        self.linear = FeaturesLinear(field_dims)

        self.fm = FactorizationMachine(reduce_sum=True)
        self.embedding = FeaturesEmbeddingWithGlobalIn(field_dims,
                                                       embed_dim,
                                                       type=type)
        self.embed_output_size = len(field_dims) * embed_dim

        self.mlp = MultiLayerPerceptron(self.embed_output_size, mlp_layers,
                                        dropout)
Пример #17
0
class WideAndDeepModel(torch.nn.Module):
    """
    A pytorch implementation of wide and deep learning.
    WD 的 工业功能比较强。 效果不错,因为参数少,运行快速。
    Reference:
        HT Cheng, et al. Wide & Deep Learning for Recommender Systems, 2016.
    """
    def __init__(self,
                 field_dims,
                 embed_dim,
                 mlp_dims,
                 dropout=0.5,
                 field_len=10):
        super(WideAndDeepModel, self).__init__()
        #     wide 部分: linear + FM
        self.linear = FeaturesLinear(field_dims)

        self.embedding = FeaturesEmbedding(field_dims, embed_dim)

        self.embed_output_dim = field_len * embed_dim
        self.mlp = MultiLayerPerceptron(self.embed_output_dim,
                                        mlp_dims,
                                        dropout=dropout)

        # self.last_linear = nn.Linear(2,1)

    def forward(self, x):
        """
        :param x: [B,n_f]
        :return: [B,1]
        """
        embed_x = self.embedding(x)
        # wide  +  DEEP, 和DeepFM比,没有FM部分,否则就是相同的
        # TODO 改进,通过一个Linear
        x = self.linear(x) + self.mlp(embed_x.view(x.size(0), -1))
        # x = self.last_linear(torch.cat([self.linear(x),self.mlp(embed_x.view(x.size(0),-1))],dim=-1))
        return x

    def get_l2_loss(self, lambdas=1e-5):
        regularization_loss = 0
        for parameter in self.embedding.parameters():
            regularization_loss += torch.sum(parameter.pow(2))
        for parameter in self.mlp.parameters():
            regularization_loss += torch.sum(parameter.pow(2))
        return lambdas * regularization_loss
Пример #18
0
    def __init__(self,
                 field_dims,
                 embed_dim,
                 mlp_dims=(400, 400, 400),
                 glunum=1,
                 dropout=0.1):
        super(WDLGate, self).__init__()
        #     wide 部分: linear + FM
        self.linear = FeaturesLinear(field_dims)

        self.embedding_gate = FeaturesEmbeddingWithGate(field_dims,
                                                        embed_dim,
                                                        glu_num=glunum)

        self.embed_output_dim = len(field_dims) * embed_dim
        self.mlp = MultiLayerPerceptron(self.embed_output_dim,
                                        mlp_dims,
                                        dropout=dropout)
Пример #19
0
class DeepFactorizationMachineModel(nn.Module):
    """
    DeepFM,主要由三部分组成
    1、线性部分
    2、FM 部分
    3、Deep部分
    """
    def __init__(self,
                 field_dims,
                 embed_dim,
                 mlp_layers=(128, 64),
                 dropout=0.5,
                 field_len=10):
        super(DeepFactorizationMachineModel, self).__init__()
        self.linear = FeaturesLinear(field_dims)
        self.fm = FactorizationMachine(embed_dim=embed_dim, reduce_sum=True)

        self.embedding = FeaturesEmbedding(field_dims, embed_dim)
        self.embed_output_size = field_len * embed_dim
        self.mlp = MultiLayerPerceptron(self.embed_output_size, mlp_layers,
                                        dropout)
        # self.last_fc = nn.Linear(3,1)

    def forward(self, x):
        """
        :param x:
        :return:
        """
        x_embed = self.embedding(x)  # [B,n_f,e]
        x_out = self.linear(x) + self.fm(x_embed) + self.mlp(
            x_embed.view(x.size(0), -1))
        # x_con = torch.cat([self.linear(x),self.fm(x_embed),self.mlp(x_embed.view(x.size(0),-1))],dim=1)
        # x_out = self.last_fc(x_con)
        return x_out

    def get_l2_loss(self, lambdas=1e-5):
        regularization_loss = 0
        for parameter in self.embedding.parameters():
            regularization_loss += torch.sum(parameter.pow(2))
        for parameter in self.mlp.parameters():
            regularization_loss += torch.sum(parameter.pow(2))
        return lambdas * regularization_loss
Пример #20
0
    def __init__(self,
                 field_dims,
                 embed_dim,
                 mlp_dims=(400, 400, 400),
                 dropout=0.5,
                 cross_layer_sizes=(200, 200, 200),
                 split_half=True):
        super().__init__()

        self.embedding = FeaturesEmbeddingWithGlobalIn(field_dims, embed_dim)
        self.embed_output_dim = len(field_dims) * embed_dim

        self.mlp = MultiLayerPerceptron(self.embed_output_dim, mlp_dims,
                                        dropout)

        # 修改cross  CIN模块
        self.cin = CompressedInteractionNetwork(len(field_dims),
                                                cross_layer_sizes, split_half)

        self.linear = FeaturesLinear(field_dims)