Ejemplo n.º 1
0
 def __init__(self, field_dims, embed_dim, mlp_dims, dropout):
     super().__init__()
     self.linear = FeaturesLinear(field_dims)
     self.fm = FactorizationMachine(reduce_sum=True)
     self.embedding = FeaturesEmbedding(field_dims, embed_dim)
     self.embed_output_dim = len(field_dims) * embed_dim
     self.mlp = MultiLayerPerceptron(self.embed_output_dim, mlp_dims, dropout)
Ejemplo n.º 2
0
 def __init__(self, field_dims, embed_dims, mlp_dims, dropout):
     super().__init__()
     self.linear = FeaturesLinear(field_dims)
     self.embedding = FeaturesEmbedding(field_dims, embed_dims)
     self.embed_output_dim = len(field_dims) * embed_dims
     self.mlp = MultiLayerPerceptron(self.embed_output_dim, mlp_dims,
                                     dropout)
Ejemplo n.º 3
0
    def __init__(self, field_dims, embed_dim, mlp_layers=(400, 400, 400), dropouts=(0.5, 0.5)):
        super(NeuralFactorizationMachineModel, self).__init__()

        self.linear = FeaturesLinear(field_dims)  # 线性部分
        self.embedding = FeaturesEmbedding(field_dims, embed_dim)  # 交互部分
        self.fm = FactorizationMachine(embed_dim, reduce_sum=False)
        self.mlp = MultiLayerPerceptron(embed_dim, mlp_layers, dropouts[1])
Ejemplo n.º 4
0
 def __init__(self, field_dims, embed_dim, mlp_dims, dropouts):
     super().__init__()
     self.embedding = FeaturesEmbedding(field_dims, embed_dim)
     self.linear = FeaturesLinear(field_dims)
     self.fm = torch.nn.Sequential(FactorizationMachine(reduce_sum=False),
                                   torch.nn.BatchNorm1d(embed_dim),
                                   torch.nn.Dropout(dropouts[0]))
     self.mlp = MultiLayerPerceptron(embed_dim, mlp_dims, dropouts[1])
Ejemplo n.º 5
0
    def __init__(self, field_dims, embed_dim, mlp_layers=(400,400,400),dropouts=(0.5, 0.5),type="glu"):
        super(GFRLNFM, self).__init__()

        self.linear = FeaturesLinear(field_dims)  # 线性部分

        self.embedding_gate = FeaturesEmbeddingWithGlobalIn(field_dims,embed_dim,type=type)

        self.fm = FactorizationMachine(reduce_sum=False)

        self.mlp = MultiLayerPerceptron(embed_dim, mlp_layers, dropouts[1])
Ejemplo n.º 6
0
    def __init__(self, field_dims, embed_dim, mlp_dims, dropout=0.5):
        super(GELWDL, self).__init__()
        #     wide 部分: linear + FM
        self.linear = FeaturesLinear(field_dims)

        self.embedding = FeaturesEmbeddingWithGlobalIn(field_dims, embed_dim)

        self.embed_output_dim = len(field_dims) * embed_dim
        self.mlp = MultiLayerPerceptron(self.embed_output_dim,
                                        mlp_dims,
                                        dropout=dropout)
Ejemplo n.º 7
0
    def __init__(self, field_dims, embed_dim, type_c="hard"):
        """
        :param field_dims: list, 每个field的
        :param embed_dim:
        """
        super(MyIFM, self).__init__()

        self.fc = FeaturesLinear(field_dims)
        # 特征
        self.embedding = FeaturesEmbedding(field_dims, embed_dim)
        self.myFM = MyIFMLayer(field_dims, embed_dim, type_c=type_c)
        self.fm = FactorizationMachine(embed_dim, reduce_sum=True)
Ejemplo n.º 8
0
    def __init__(self,
                 field_dims,
                 embed_dim,
                 mlp_layers=(128, 64),
                 dropout=0.5,
                 field_len=10):
        super(DeepFactorizationMachineModel, self).__init__()
        self.linear = FeaturesLinear(field_dims)
        self.fm = FactorizationMachine(embed_dim=embed_dim, reduce_sum=True)

        self.embedding = FeaturesEmbedding(field_dims, embed_dim)
        self.embed_output_size = field_len * embed_dim
        self.mlp = MultiLayerPerceptron(self.embed_output_size, mlp_layers,
                                        dropout)
Ejemplo n.º 9
0
    def __init__(self, field_dims, embed_dim, embed_dims, field_len=10):
        """
        :param field_dims: list, 每个field的
        :param embed_dim:
        """
        super(IFM, self).__init__()

        self.fc = FeaturesLinear(field_dims)
        self.embedding = FeaturesEmbedding(field_dims, embed_dim)
        self.ifm = IFMLayer(field_dims,
                            embed_dim,
                            embed_dims=embed_dims,
                            field_len=field_len)

        self.fm = FactorizationMachine(embed_dim, reduce_sum=True)
Ejemplo n.º 10
0
    def __init__(self,
                 field_dims,
                 embed_dim,
                 mlp_dims=(400, 400, 400),
                 dropout=0.5,
                 cross_layer_sizes=(100, 100, 100),
                 split_half=True):
        super().__init__()
        self.embedding = FeaturesEmbeddingWithGlobalIn(field_dims, embed_dim)
        self.embed_output_dim = len(field_dims) * embed_dim
        # self.mlp = MultiLayerPerceptron(self.embed_output_dim, mlp_dims, dropout)
        self.cin = CompressedInteractionNetwork(len(field_dims),
                                                cross_layer_sizes, split_half)

        self.linear = FeaturesLinear(field_dims)
Ejemplo n.º 11
0
    def __init__(self, field_dims, embed_dim, mlp_layers=(400,400,400), glunum=2, dropouts=(0.1, 0.1)):
        super().__init__()

        self.linear = FeaturesLinear(field_dims)  # 线性部分
        # self.embedding_gate = FeaturesEmbeddingWithGate(field_dims,embed_dim,glu_num=glunum)
        self.embedding = FeaturesEmbedding(field_dims, embed_dim)  # 交互部分,先
        # 第二层: 门机制,提取信息
        #
        self.gene_inter = GenerateConv()

        glu_list = list()
        for _ in range(glunum):
            glu_list.append(GLUActivation1D(embed_dim, int(embed_dim*2)))
        self.glus = torch.nn.Sequential(*glu_list)
        # self.mlp_input = len(field_dims) * (len(field_dims)-1)/2
        self.mlp = MultiLayerPerceptron(embed_dim, mlp_layers, dropouts[1])
Ejemplo n.º 12
0
    def __init__(self,
                 field_dims,
                 embed_dim,
                 mlp_dims,
                 dropout=0.5,
                 field_len=10):
        super(WideAndDeepModel, self).__init__()
        #     wide 部分: linear + FM
        self.linear = FeaturesLinear(field_dims)

        self.embedding = FeaturesEmbedding(field_dims, embed_dim)

        self.embed_output_dim = field_len * embed_dim
        self.mlp = MultiLayerPerceptron(self.embed_output_dim,
                                        mlp_dims,
                                        dropout=dropout)
Ejemplo n.º 13
0
    def __init__(self,
                 field_dims,
                 embed_dim,
                 mlp_layers=(128, 64),
                 dropout=0.5,
                 type="glu"):
        super(GELDFM, self).__init__()
        self.linear = FeaturesLinear(field_dims)

        self.fm = FactorizationMachine(reduce_sum=True)
        self.embedding = FeaturesEmbeddingWithGlobalIn(field_dims,
                                                       embed_dim,
                                                       type=type)
        self.embed_output_size = len(field_dims) * embed_dim

        self.mlp = MultiLayerPerceptron(self.embed_output_size, mlp_layers,
                                        dropout)
Ejemplo n.º 14
0
    def __init__(self, field_dims, embed_dim, mlp_dims, dropout):
        """
        :param field_dims: Number of input dimensions
        :param embed_dim: Number of dense embedding dimensions
        :param mlp_dims: Number of hidden layers
        :param dropout: dropout rate
        """
        super().__init__()

        # Wide Learning Component
        self.linear = FeaturesLinear(field_dims)

        # Deep Learning Component
        self.embedding = FeaturesEmbedding(field_dims, embed_dim)
        self.embed_output_dim = len(field_dims) * embed_dim
        self.mlp = MultiLayerPerceptron(self.embed_output_dim, mlp_dims,
                                        dropout)
Ejemplo n.º 15
0
    def __init__(self,
                 field_dims,
                 embed_dim,
                 mlp_dims=(400, 400, 400),
                 glunum=1,
                 dropout=0.1):
        super(WDLGate, self).__init__()
        #     wide 部分: linear + FM
        self.linear = FeaturesLinear(field_dims)

        self.embedding_gate = FeaturesEmbeddingWithGate(field_dims,
                                                        embed_dim,
                                                        glu_num=glunum)

        self.embed_output_dim = len(field_dims) * embed_dim
        self.mlp = MultiLayerPerceptron(self.embed_output_dim,
                                        mlp_dims,
                                        dropout=dropout)
Ejemplo n.º 16
0
    def __init__(self, field_dims, embed_dim, mlp_layers=(400, 400, 400), dropouts=(0.5, 0.5)):
        super(NON2, self).__init__()

        self.linear = FeaturesLinear(field_dims)  # 线性部分
        # self.embedding = FeaturesEmbedding(field_dims, embed_dim)  # embedding
        self.embedding = FeaturesEmbeddingWithGlobalIn(field_dims, embed_dim)  # embedding

        self.dnn = MultiLayerPerceptron(embed_dim*len(field_dims), mlp_layers,dropout=dropouts[0], output_layer=False)

        self.atten_embedding = torch.nn.Linear(embed_dim, 32)
        self.atten_output_dim = len(field_dims) * 32

        self.self_attns = torch.nn.ModuleList([
                torch.nn.MultiheadAttention(32, 4, dropout=dropouts[0]) for _ in range(3)
            ])

        self.input_dim = 400 + self.atten_output_dim + 1
        self.mlp = MultiLayerPerceptron(self.input_dim, embed_dims=(64,32), dropout=dropouts[1])
Ejemplo n.º 17
0
 def __init__(self,field_dims):
     super(LogisticRegression, self).__init__()
     self.linear = FeaturesLinear(field_dims)
Ejemplo n.º 18
0
 def __init__(self, field_dims, embed_dim):
     super().__init__()
     self.embedding = FeaturesEmbedding(field_dims, embed_dim)
     self.linear = FeaturesLinear(field_dims)
     self.fm = FactorizationMachine(reduce_sum = True)
Ejemplo n.º 19
0
 def __init__(self, field_dims, embed_dim):
     super(FwFM, self).__init__()
     self.lr = FeaturesLinear(field_dims)
     self.embedding = FeaturesEmbedding(field_dims, embed_dim)
     self.fwfm = FwFMInterLayer(field_dims)
Ejemplo n.º 20
0
 def __init__(self, field_dims, embed_dim):
     super().__init__()
     self.linear = FeaturesLinear(field_dims)
     self.ffm = FieldAwareFactorizationMachine(field_dims, embed_dim)