Example #1
0
class NeuralFactorizationMachineModel(torch.nn.Module):
    """
    A pytorch implementation of Neural Factorization Machine.

    将FM的第二部分,
    Reference:
        X He and TS Chua, Neural Factorization Machines for Sparse Predictive Analytics, 2017.
    """

    def __init__(self, field_dims, embed_dim, mlp_layers=(400, 400, 400), dropouts=(0.5, 0.5)):
        super(NeuralFactorizationMachineModel, self).__init__()

        self.linear = FeaturesLinear(field_dims)  # 线性部分
        self.embedding = FeaturesEmbedding(field_dims, embed_dim)  # 交互部分
        self.fm = FactorizationMachine(embed_dim, reduce_sum=False)
        self.mlp = MultiLayerPerceptron(embed_dim, mlp_layers, dropouts[1])

    def forward(self, x):
        """
        :param x: [B,n_f]
        :return: [B,1]
        """
        x_fm = self.fm(self.embedding(x))  # [B,d]
        x = self.linear(x) + self.mlp(x_fm)
        return x

    def get_l2_loss(self, lambdas=1e-5):
        regularization_loss = 0
        for parameter in self.mlp.parameters():
            regularization_loss += torch.sum(parameter.pow(2))
        return lambdas*regularization_loss
Example #2
0
class WideAndDeepModel(torch.nn.Module):
    """
    A pytorch implementation of wide and deep learning.
    WD 的 工业功能比较强。 效果不错,因为参数少,运行快速。
    Reference:
        HT Cheng, et al. Wide & Deep Learning for Recommender Systems, 2016.
    """
    def __init__(self,
                 field_dims,
                 embed_dim,
                 mlp_dims,
                 dropout=0.5,
                 field_len=10):
        super(WideAndDeepModel, self).__init__()
        #     wide 部分: linear + FM
        self.linear = FeaturesLinear(field_dims)

        self.embedding = FeaturesEmbedding(field_dims, embed_dim)

        self.embed_output_dim = field_len * embed_dim
        self.mlp = MultiLayerPerceptron(self.embed_output_dim,
                                        mlp_dims,
                                        dropout=dropout)

        # self.last_linear = nn.Linear(2,1)

    def forward(self, x):
        """
        :param x: [B,n_f]
        :return: [B,1]
        """
        embed_x = self.embedding(x)
        # wide  +  DEEP, 和DeepFM比,没有FM部分,否则就是相同的
        # TODO 改进,通过一个Linear
        x = self.linear(x) + self.mlp(embed_x.view(x.size(0), -1))
        # x = self.last_linear(torch.cat([self.linear(x),self.mlp(embed_x.view(x.size(0),-1))],dim=-1))
        return x

    def get_l2_loss(self, lambdas=1e-5):
        regularization_loss = 0
        for parameter in self.embedding.parameters():
            regularization_loss += torch.sum(parameter.pow(2))
        for parameter in self.mlp.parameters():
            regularization_loss += torch.sum(parameter.pow(2))
        return lambdas * regularization_loss
Example #3
0
class DeepFactorizationMachineModel(nn.Module):
    """
    DeepFM,主要由三部分组成
    1、线性部分
    2、FM 部分
    3、Deep部分
    """
    def __init__(self,
                 field_dims,
                 embed_dim,
                 mlp_layers=(128, 64),
                 dropout=0.5,
                 field_len=10):
        super(DeepFactorizationMachineModel, self).__init__()
        self.linear = FeaturesLinear(field_dims)
        self.fm = FactorizationMachine(embed_dim=embed_dim, reduce_sum=True)

        self.embedding = FeaturesEmbedding(field_dims, embed_dim)
        self.embed_output_size = field_len * embed_dim
        self.mlp = MultiLayerPerceptron(self.embed_output_size, mlp_layers,
                                        dropout)
        # self.last_fc = nn.Linear(3,1)

    def forward(self, x):
        """
        :param x:
        :return:
        """
        x_embed = self.embedding(x)  # [B,n_f,e]
        x_out = self.linear(x) + self.fm(x_embed) + self.mlp(
            x_embed.view(x.size(0), -1))
        # x_con = torch.cat([self.linear(x),self.fm(x_embed),self.mlp(x_embed.view(x.size(0),-1))],dim=1)
        # x_out = self.last_fc(x_con)
        return x_out

    def get_l2_loss(self, lambdas=1e-5):
        regularization_loss = 0
        for parameter in self.embedding.parameters():
            regularization_loss += torch.sum(parameter.pow(2))
        for parameter in self.mlp.parameters():
            regularization_loss += torch.sum(parameter.pow(2))
        return lambdas * regularization_loss