Пример #1
0
    def __init__(self,
                 user_sparse_and_nums,
                 user_dense,
                 item_sparse_and_nums,
                 item_dense,
                 embed_dim,
                 deep_layers,
                 attention_layers=[512, 128, 1],
                 sigmoid_out=False):
        super(DIN, self).__init__()
        self.user_feat_num = len(user_sparse_and_nums) * embed_dim + len(
            user_dense)
        self.item_feat_num = len(item_sparse_and_nums) * embed_dim + len(
            item_dense)

        #embed
        self.user_embed = SparseEmbeddingLayer(
            feat_and_nums=user_sparse_and_nums, embed_dim=embed_dim)
        self.user_dense = DenseFeatCatLayer()
        self.item_embed = SparseEmbeddingLayer(
            feat_and_nums=item_sparse_and_nums, embed_dim=embed_dim)
        self.item_dense = DenseFeatCatLayer()

        #Attention
        self.attention = Attention(self.item_feat_num, layers=attention_layers)

        #Deep
        assert deep_layers[-1] == 1, "last hidden dim must be 1"
        deep_input_dim = self.user_feat_num + self.item_feat_num * 2  #*2 is item feat and attention feat
        self.deep = DNN(input_dim=deep_input_dim, layers=deep_layers)

        #output
        self.sigmoid_out = sigmoid_out
        if self.sigmoid_out:
            self.sigmoid = nn.Sigmoid()
Пример #2
0
    def __init__(self,
                 sparse_feat_and_nums,
                 cross_feat_and_nums,
                 dense_feat,
                 embed_dim,
                 deep_layers,
                 sigmoid_out=False):
        super(WideDeep, self).__init__()
        sparse_cross_feat_and_nums = sparse_feat_and_nums + cross_feat_and_nums

        #wide
        self.wide = Linear(sparse_feat_and_nums=sparse_cross_feat_and_nums,
                           dense_feat=dense_feat)

        #deep
        self.embed = SparseEmbeddingLayer(feat_and_nums=sparse_feat_and_nums,
                                          embed_dim=embed_dim)
        self.dense = DenseFeatCatLayer()
        assert deep_layers[-1] == 1, "last hidden dim must be 1"
        deep_dim = len(sparse_cross_feat_and_nums) * embed_dim + len(
            dense_feat)
        self.deep = DNN(input_dim=deep_dim,
                        layers=deep_layers,
                        act='relu',
                        drop=0.2,
                        bn=False)

        #output
        self.sigmoid_out = sigmoid_out
        if self.sigmoid_out:
            self.sigmoid = nn.Sigmoid()
Пример #3
0
    def __init__(self,
                 sparse_feat_and_nums,
                 cross_layers,
                 dense_feat,
                 embed_dim,
                 deep_layers,
                 sigmoid_out=False):
        super(DCN, self).__init__()
        x0_dim = len(sparse_feat_and_nums) * embed_dim + len(dense_feat)

        #embed
        self.embed = SparseEmbeddingLayer(feat_and_nums=sparse_feat_and_nums,
                                          embed_dim=embed_dim)
        self.dense = DenseFeatCatLayer()

        #cross
        self.crossNet = CrossNet(x0_dim, cross_layers)

        #deep
        self.deep = DNN(input_dim=x0_dim,
                        layers=deep_layers,
                        act='relu',
                        drop=0.2,
                        bn=False)

        #output
        self.linear = nn.Linear(deep_layers[-1] + x0_dim, 1)
        self.drop = nn.Dropout()
        self.sigmoid_out = sigmoid_out
        if self.sigmoid_out:
            self.sigmoid = nn.Sigmoid()
Пример #4
0
    def __init__(self,
                 sparse_feat_and_nums,
                 dense_feat,
                 embed_dim,
                 res_layers,
                 sigmoid_out=False):
        super(DeepCrossing, self).__init__()

        #shared embed
        self.embed = SparseEmbeddingLayer(feat_and_nums=sparse_feat_and_nums,
                                          embed_dim=embed_dim)
        self.dense = DenseFeatCatLayer()

        #Residual_Units
        self.stack_dim = len(sparse_feat_and_nums) * embed_dim + len(
            dense_feat)
        self.resnet = nn.Sequential(
            *[ResNet(self.stack_dim, layer) for layer in res_layers])

        #output
        self.linear = nn.Linear(self.stack_dim, 1)
        self.drop = nn.Dropout()
        self.sigmoid_out = sigmoid_out
        if self.sigmoid_out:
            self.sigmoid = nn.Sigmoid()
Пример #5
0
    def __init__(self,
                 sparse_feat_and_nums,
                 dense_feat,
                 embed_dim,
                 deep_layers,
                 sigmoid_out=False):
        super(NFM, self).__init__()

        #linear
        self.linear = Linear(sparse_feat_and_nums=sparse_feat_and_nums,
                             dense_feat=dense_feat)

        #embed
        self.embed = SparseEmbeddingLayer(feat_and_nums=sparse_feat_and_nums,
                                          embed_dim=embed_dim)
        self.dense = DenseFeatCatLayer()

        #Bi-Interaction pooling
        self.interaction = BiInteractionPooling()
        self.bi_drop = nn.Dropout()

        #deep
        assert deep_layers[-1] == 1, "last hidden dim must be 1"
        self.deep_input_dim = embed_dim + len(dense_feat)
        self.deep = DNN(input_dim=self.deep_input_dim,
                        layers=deep_layers,
                        act='tanh')

        #output
        self.sigmoid_out = sigmoid_out
        if self.sigmoid_out:
            self.sigmoid = nn.Sigmoid()
Пример #6
0
    def __init__(self,
                 sparse_feat_and_nums,
                 dense_feat,
                 embed_dim,
                 deep_layers,
                 sigmoid_out=False):
        super(DeepFM, self).__init__()

        #shared embed
        self.embed = SparseEmbeddingLayer(feat_and_nums=sparse_feat_and_nums,
                                          embed_dim=embed_dim)
        self.dense = DenseFeatCatLayer()

        #fm interaction
        self.linear = Linear(sparse_feat_and_nums=sparse_feat_and_nums,
                             dense_feat=dense_feat)
        self.fm_interaction = FMInteraction()

        #deep
        assert deep_layers[-1] == 1, "last hidden dim must be 1"
        self.deep_input_dim = len(sparse_feat_and_nums) * embed_dim + len(
            dense_feat)
        self.deep = DNN(input_dim=self.deep_input_dim,
                        layers=deep_layers,
                        act='tanh')

        #output
        self.sigmoid_out = sigmoid_out
        if self.sigmoid_out:
            self.sigmoid = nn.Sigmoid()
Пример #7
0
    def __init__(self,
                 user_sparse_and_nums,
                 user_dense,
                 item_sparse_and_nums,
                 item_dense,
                 embed_dim,
                 deep_layers,
                 gru1_hidden_dim=512,
                 gru2_hidden_dim=256,
                 sigmoid_out=False):
        super(DIEN, self).__init__()
        self.user_feat_num = len(user_sparse_and_nums) * embed_dim + len(
            user_dense)
        self.item_feat_num = len(item_sparse_and_nums) * embed_dim + len(
            item_dense)

        #embed
        self.user_embed = SparseEmbeddingLayer(
            feat_and_nums=user_sparse_and_nums, embed_dim=embed_dim)
        self.user_dense = DenseFeatCatLayer()
        self.item_embed = SparseEmbeddingLayer(
            feat_and_nums=item_sparse_and_nums, embed_dim=embed_dim)
        self.item_dense = DenseFeatCatLayer()

        #InterestExtractorLayer
        self.extractor = InterestExtractorLayer(input_dim=self.item_feat_num,
                                                hidden_dim=gru1_hidden_dim)
        #InterestEvolvingLayer
        self.evolving = InterestEvolvingLayer(item_feat_dim=self.item_feat_num,
                                              input_dim=gru1_hidden_dim,
                                              hidden_dim=gru2_hidden_dim,
                                              gru_type='AUGRU')

        #Deep
        assert deep_layers[-1] == 1, "last hidden dim must be 1"
        deep_input_dim = self.user_feat_num + self.item_feat_num + gru2_hidden_dim
        self.deep = DNN(input_dim=deep_input_dim, layers=deep_layers)

        #output
        self.sigmoid_out = sigmoid_out
        if self.sigmoid_out:
            self.sigmoid = nn.Sigmoid()
Пример #8
0
    def __init__(self,
                 sparse_feat_and_nums,
                 dense_feat,
                 embed_dim,
                 deep_layers,
                 inner_product=True,
                 outer_product=True,
                 sigmoid_out=False):
        super(PNN, self).__init__()
        self.inner_product = inner_product
        self.outer_product = outer_product
        self.D1 = deep_layers[0]

        #embed
        self.embed = SparseEmbeddingLayer(feat_and_nums=sparse_feat_and_nums,
                                          embed_dim=embed_dim)
        self.dense = DenseFeatCatLayer()

        #Wz
        self.Wz = nn.Conv1d(in_channels=len(sparse_feat_and_nums),
                            out_channels=self.D1,
                            kernel_size=embed_dim)

        #Wp
        if self.inner_product:  #IPNN
            self.Inner = InnerProduct()
            self.Wp_inner = nn.Conv1d(in_channels=len(sparse_feat_and_nums),
                                      out_channels=self.D1,
                                      kernel_size=len(sparse_feat_and_nums))
        if self.outer_product:  #OPNN
            self.Outer = OuterProduct()
            self.Wp_outer = nn.Conv1d(in_channels=embed_dim,
                                      out_channels=self.D1,
                                      kernel_size=embed_dim)

        #b1
        self.B1 = nn.Parameter(nn.init.xavier_normal_(torch.empty(self.D1, 1)))

        #Deep
        assert deep_layers[-1] == 1, "last hidden dim must be 1"
        self.deep_input_dim = self.D1 + len(dense_feat)
        self.deep = DNN(input_dim=self.deep_input_dim,
                        layers=deep_layers,
                        act='tanh')

        #output
        self.sigmoid_out = sigmoid_out
        if self.sigmoid_out:
            self.sigmoid = nn.Sigmoid()
Пример #9
0
    def __init__(self,
                 sparse_feat_and_nums,
                 dense_feat,
                 embed_dim,
                 deep_layers,
                 con_channels=(64, 32, 16),
                 con_kernel_sizes=(7, 5, 3),
                 sigmoid_out=False):
        super(CCPM, self).__init__()

        #embed
        self.embed = SparseEmbeddingLayer(feat_and_nums=sparse_feat_and_nums,
                                          embed_dim=embed_dim)
        self.dense = DenseFeatCatLayer()

        #CNN layer
        conv_layers = []
        self.cur_feat_nums = len(sparse_feat_and_nums)
        n = len(sparse_feat_and_nums)
        l = len(con_channels)

        for i in range(1, l + 1):
            k = max(1, int((1 - pow(i / l, l - i)) * n)) if i < l else 3

            conv_layers.append(
                CNNLayer(
                    in_channels=con_channels[i - 2]
                    if i != 1 else 1,  #first channel is 1
                    out_channels=con_channels[i - 1],
                    feat_nums=self.cur_feat_nums,
                    con_kernel=con_kernel_sizes[i - 1],
                    topk=k))
            self.cur_feat_nums = min(k, self.cur_feat_nums)
        self.convs = nn.Sequential(*conv_layers)

        #deep
        assert deep_layers[-1] == 1, "last hidden dim must be 1"
        self.deep_input_dim = self.cur_feat_nums * con_channels[
            -1] * embed_dim + len(dense_feat)
        self.deep = DNN(input_dim=self.deep_input_dim,
                        layers=deep_layers,
                        act='tanh',
                        bn=True)

        #output
        self.sigmoid_out = sigmoid_out
        if self.sigmoid_out:
            self.sigmoid = nn.Sigmoid()
Пример #10
0
    def __init__(self,
                 sparse_feat_and_nums,
                 dense_feat,
                 embed_dim,
                 deep_layers,
                 num_log_neurons=64,
                 sigmoid_out=False):
        super(AFN, self).__init__()

        #afn
        self.afn_embed = Embedding(sparse_feat_and_nums, dense_feat, embed_dim)
        self.afn_ltl = LogarithmicTransformerLayer(
            len(sparse_feat_and_nums) + len(dense_feat), num_log_neurons)
        self.drop_out1 = nn.Dropout()
        self.afn_deep_input_dim = num_log_neurons * embed_dim
        self.drop_out2 = nn.Dropout()
        self.afn_bn1 = nn.BatchNorm1d(num_log_neurons * embed_dim)
        assert deep_layers[-1] == 1, "last hidden dim must be 1"
        self.afn_deep = DNN(self.afn_deep_input_dim,
                            layers=deep_layers,
                            act='tanh')

        #deep
        self.deep_sparse_embed = SparseEmbeddingLayer(sparse_feat_and_nums,
                                                      embed_dim)
        self.deep_dense = DenseFeatCatLayer()
        self.deep_input_dim = len(sparse_feat_and_nums) * embed_dim + len(
            dense_feat)
        self.deep = DNN(input_dim=self.deep_input_dim,
                        layers=deep_layers,
                        act='tanh')

        #ensemble
        self.afn_w = nn.Parameter(torch.tensor([0.5]))
        self.deep_w = nn.Parameter(torch.tensor([0.5]))
        self.bias = nn.Parameter(torch.zeros(1))

        #output
        self.sigmoid_out = sigmoid_out
        if self.sigmoid_out:
            self.sigmoid = nn.Sigmoid()
Пример #11
0
    def __init__(self,
                 sparse_feat_and_nums,
                 dense_feat,
                 embed_dim,
                 hidden_size,
                 num_attention_heads,
                 num_att_layers,
                 deep_layers,
                 all_layer_output=False,
                 sigmoid_out=False):
        super(AutoInt, self).__init__()
        #define
        self.all_layer_output = all_layer_output
        self.num_feat = len(sparse_feat_and_nums) + len(dense_feat)
        self.att_output_dim = num_att_layers * embed_dim * self.num_feat if self.all_layer_output else embed_dim * self.num_feat

        #embed
        self.sparse_embed = SparseEmbeddingLayer(
            feat_and_nums=sparse_feat_and_nums, embed_dim=embed_dim)
        self.dense_embed = DenseEmbeddingLayer(dense_feat=dense_feat,
                                               embed_dim=embed_dim)

        #transformer
        self.layers = nn.ModuleList([
            TransformerBase(input_size=embed_dim,
                            hidden_size=hidden_size,
                            num_attention_heads=num_attention_heads,
                            resnet=True) for _ in range(num_att_layers)
        ])

        #deep
        assert deep_layers[-1] == 1, "last hidden dim must be 1"
        self.deep = DNN(input_dim=self.att_output_dim,
                        layers=deep_layers,
                        act='tanh')

        #output
        self.sigmoid_out = sigmoid_out
        if self.sigmoid_out:
            self.sigmoid = nn.Sigmoid()
Пример #12
0
    def __init__(self,
                 sparse_feat_and_nums,
                 dense_feat,
                 embed_dim,
                 attention_factor,
                 sigmoid_out=False):
        super(AFM, self).__init__()

        #embed
        self.embed = SparseEmbeddingLayer(feat_and_nums=sparse_feat_and_nums,
                                          embed_dim=embed_dim)
        # linear
        self.linear = Linear(sparse_feat_and_nums=sparse_feat_and_nums,
                             dense_feat=dense_feat)

        #fm attention
        self.fmAttention = FMAttention(embed_dim, attention_factor)

        #output
        self.sigmoid_out = sigmoid_out
        if self.sigmoid_out:
            self.sigmoid = nn.Sigmoid()
Пример #13
0
    def __init__(self,
                 sparse_feat_and_nums,
                 dense_feat,
                 embed_dim,
                 deep_layers,
                 cin_layers=[128, 128],
                 sigmoid_out=False):
        super(XDeepFM, self).__init__()

        #shared embed
        self.embed = SparseEmbeddingLayer(feat_and_nums=sparse_feat_and_nums,
                                          embed_dim=embed_dim)
        self.dense = DenseFeatCatLayer()

        #linear
        self.linear = Linear(sparse_feat_and_nums, dense_feat, bias=False)

        #cin
        self.cin = CIN(h_layers=cin_layers,
                       sparse_feat_num=len(sparse_feat_and_nums))
        cin_cores = sum([num - num // 2 for num in cin_layers])
        self.cin_w = nn.Linear(cin_cores, 1)

        #deep
        self.deep_input_dim = len(sparse_feat_and_nums) * embed_dim + len(
            dense_feat)
        self.deep = DNN(input_dim=self.deep_input_dim,
                        layers=deep_layers,
                        act='tanh')
        self.deep_w = nn.Linear(deep_layers[-1], 1)

        #bias
        self.b = nn.Parameter(torch.zeros(1))

        #output
        self.sigmoid_out = sigmoid_out
        if self.sigmoid_out:
            self.sigmoid = nn.Sigmoid()
Пример #14
0
    def __init__(self,
                 sparse_feat_and_nums,
                 dense_feat,
                 embed_dim,
                 deep_layers,
                 senet_factor,
                 inter_type='all',
                 sigmoid_out=False):
        super(FiBiNet, self).__init__()

        #shared embed
        self.embed = SparseEmbeddingLayer(feat_and_nums=sparse_feat_and_nums,
                                          embed_dim=embed_dim)
        self.dense = DenseFeatCatLayer()

        #linear
        self.linear = Linear(sparse_feat_and_nums, dense_feat, bias=False)

        #SeNet
        self.senet = SeNet(len(sparse_feat_and_nums), senet_factor)

        #BilinearInteraction
        self.biLinear = BilinearInteraction(num_feat=len(sparse_feat_and_nums),
                                            embed_dim=embed_dim,
                                            inter_type=inter_type)

        #deep
        assert deep_layers[-1] == 1, "last hidden dim must be 1"
        self.deep_input_dim = len(sparse_feat_and_nums) * (
            len(sparse_feat_and_nums) - 1) * embed_dim + len(dense_feat)
        self.deep = DNN(input_dim=self.deep_input_dim,
                        layers=deep_layers,
                        act='tanh')

        #output
        self.sigmoid_out = sigmoid_out
        if self.sigmoid_out:
            self.sigmoid = nn.Sigmoid()
Пример #15
0
    def __init__(self,
                 sparse_feat_and_nums,
                 dense_feat,
                 embed_dim,
                 hidden_size,
                 num_attention_heads,
                 context_dim,
                 k_layers,
                 deep_layers,
                 sigmoid_out=False):
        super(InterHAt, self).__init__()
        #embed
        self.sparse_embed = SparseEmbeddingLayer(
            feat_and_nums=sparse_feat_and_nums, embed_dim=embed_dim)
        self.dense_embed = DenseEmbeddingLayer(dense_feat=dense_feat,
                                               embed_dim=embed_dim)

        #transformer
        self.transformer = TransformerBase(
            input_size=embed_dim,
            hidden_size=hidden_size,
            num_attention_heads=num_attention_heads)

        #Hierarchical Attention
        self.hier_att = HierarchicalAttention(embed_dim, context_dim, k_layers)

        #deep
        assert deep_layers[-1] == 1, "last hidden dim must be 1"
        self.deep_input_dim = embed_dim
        self.deep = DNN(input_dim=self.deep_input_dim,
                        layers=deep_layers,
                        act='tanh')

        #output
        self.sigmoid_out = sigmoid_out
        if self.sigmoid_out:
            self.sigmoid = nn.Sigmoid()
Пример #16
0
    def __init__(self,
                 sparse_feat_and_nums,
                 dense_feat,
                 embed_dim,
                 deep_layers,
                 rec_feat_maps=(64, 32, 16),
                 con_channels=(64, 32, 16),
                 con_kernel_sizes=(7, 5, 3),
                 pool_kernel_sizes=(2, 2, 2),
                 is_share_embed=False,
                 sigmoid_out=False):
        super(FGCNN, self).__init__()

        #define
        assert len(con_kernel_sizes) == len(pool_kernel_sizes) and  \
               len(pool_kernel_sizes) == len(con_channels) and  \
               len(con_channels) == len(rec_feat_maps), \
               'convolution layers kernel size, convolution channels size, pool layers kernel size, recommend layers feat maps must equal'

        #Feature Generation embed
        self.fg_embed = SparseEmbeddingLayer(
            feat_and_nums=sparse_feat_and_nums, embed_dim=embed_dim)
        self.fg_dense = DenseFeatCatLayer()

        #Deep Classifier embed
        if is_share_embed:
            self.dc_embed = self.fg_embed
            # self.dc_dense = self.fg_dense
        else:
            self.dc_embed = SparseEmbeddingLayer(
                feat_and_nums=sparse_feat_and_nums, embed_dim=embed_dim)
            # self.dc_dense = DenseFeatCatLayer()

        #Feature Generation
        self.inner_feat_nums = len(sparse_feat_and_nums)  #raw features
        self.convs_feat_nums_list = [len(sparse_feat_and_nums)]
        self.convs = nn.ModuleList()

        for i in range(len(con_channels)):
            self.convs.append(
                CNNRec(
                    in_channels=con_channels[i - 1]
                    if i != 0 else 1,  #first channel is 1
                    out_channels=con_channels[i],
                    feat_nums=self.convs_feat_nums_list[-1],
                    con_kernel=con_kernel_sizes[i],
                    pool_kernel=pool_kernel_sizes[i],
                    rec_feat_map=rec_feat_maps[i]))
            cur_rec_feat_nums = self.convs_feat_nums_list[
                -1] // pool_kernel_sizes[i]
            self.inner_feat_nums += cur_rec_feat_nums * rec_feat_maps[
                i]  #new features
            self.convs_feat_nums_list.append(cur_rec_feat_nums)

        #IPNN
        self.Inner = InnerProduct()

        #Deep Classifier
        assert deep_layers[-1] == 1, "last hidden dim must be 1"

        self.deep_input_dim = int(
            self.inner_feat_nums * (self.inner_feat_nums - 1) /
            2) + len(sparse_feat_and_nums) * embed_dim + len(dense_feat)
        self.deep = DNN(input_dim=self.deep_input_dim,
                        layers=deep_layers,
                        act='tanh',
                        bn=True)

        #output
        self.sigmoid_out = sigmoid_out
        if self.sigmoid_out:
            self.sigmoid = nn.Sigmoid()
Пример #17
0
    def __init__(self,
                 user_sparse_and_nums,
                 user_dense,
                 item_sparse_and_nums,
                 item_dense,
                 uer_embed_dim,
                 target_item_embed_dim,
                 seq_item_embed_dim,
                 deep_layers,
                 max_seq_len,
                 position_embed_dim,
                 attention_hidden_dim,
                 is_share_embed=False,
                 sigmoid_out=False):
        super(DMR, self).__init__()
        self.is_share_embed = is_share_embed

        #user embed
        self.user_feat_num = len(user_sparse_and_nums) * uer_embed_dim + len(
            user_dense)
        self.user_embed = SparseEmbeddingLayer(
            feat_and_nums=user_sparse_and_nums, embed_dim=uer_embed_dim)
        self.user_dense = DenseFeatCatLayer()

        #item embed
        self.target_item_feat_num = len(
            item_sparse_and_nums) * target_item_embed_dim + len(item_dense)
        self.target_item_embed = SparseEmbeddingLayer(
            feat_and_nums=item_sparse_and_nums,
            embed_dim=target_item_embed_dim)
        self.target_item_dense = DenseFeatCatLayer()
        if not self.is_share_embed:
            self.seq_item_feat_num = len(
                item_sparse_and_nums) * seq_item_embed_dim + len(item_dense)
            self.seq_item_embed = SparseEmbeddingLayer(
                feat_and_nums=item_sparse_and_nums,
                embed_dim=seq_item_embed_dim)
            self.seq_item_dense = DenseFeatCatLayer()
        else:
            assert seq_item_embed_dim == target_item_embed_dim, 'if shared embedding, seq_item_embed_dim num be equal target_item_embed_dim'
            self.seq_item_feat_num = self.target_item_feat_num
            self.seq_item_embed = self.target_item_embed
            self.seq_item_dense = self.target_item_dense

        #seq position embed
        self.pos_embed = nn.Parameter(
            nn.init.xavier_normal_(torch.empty(max_seq_len,
                                               position_embed_dim)))

        #User-to-Item Network
        self.u2i = User2Item(target_item_feat_dim=target_item_embed_dim,
                             seq_item_feat_dim=seq_item_embed_dim,
                             position_feat_dim=position_embed_dim,
                             attention_hidden_dim=attention_hidden_dim)

        #Item-to-Item Network
        self.i2i = Item2Item(target_item_feat_dim=target_item_embed_dim,
                             seq_item_feat_dim=seq_item_embed_dim,
                             position_feat_dim=position_embed_dim,
                             attention_hidden_dim=attention_hidden_dim)

        #Deep
        assert deep_layers[-1] == 1, "last hidden dim must be 1"
        self.deep_input_dim = self.user_feat_num + self.target_item_feat_num + self.seq_item_feat_num + 1 + 1
        self.deep = DNN(input_dim=self.deep_input_dim,
                        layers=deep_layers,
                        act='prelu')

        #output
        self.sigmoid_out = sigmoid_out
        if self.sigmoid_out:
            self.sigmoid = nn.Sigmoid()
Пример #18
0
 def __init__(self, sparse_feat_and_nums, dense_feat, embed_dim):
     super(Embedding, self).__init__()
     self.sparse_embed = SparseEmbeddingLayer(
         feat_and_nums=sparse_feat_and_nums, embed_dim=embed_dim)
     self.dense_embed = DenseEmbeddingLayer(dense_feat=dense_feat,
                                            embed_dim=embed_dim)