Esempio n. 1
0
    def __init__(self,
                 emb_dim,
                 num_feats,
                 num_fields,
                 fc_dims=None,
                 dropout=None,
                 batch_norm=None,
                 out_type='binary'):
        super(NFM, self).__init__()
        self.emb_dim = emb_dim
        self.num_feats = num_feats
        self.num_fields = num_fields

        self.first_order_weights = nn.Embedding(num_embeddings=num_feats,
                                                embedding_dim=1)
        nn.init.xavier_uniform_(self.first_order_weights.weight)
        self.first_order_bias = nn.Parameter(torch.randn(1))

        self.emb_layer = nn.Embedding(num_embeddings=num_feats,
                                      embedding_dim=emb_dim)
        nn.init.xavier_uniform_(self.emb_layer.weight)

        self.bi_intaraction_layer = BiInteractionLayer()
        if not fc_dims:
            fc_dims = [32, 32]
        self.fc_dims = fc_dims
        self.fc_layers = MLP(emb_dim, fc_dims, dropout, batch_norm)

        self.h = nn.Parameter(torch.zeros(1, fc_dims[-1]))  # 1 * fc_dims[-1]
        nn.init.xavier_uniform_(self.h.data)
        self.output_layer = OutputLayer(in_dim=1, out_type=out_type)
Esempio n. 2
0
    def __init__(self,
                 emb_dim,
                 feat_dim,
                 num_fields,
                 fc_dims=None,
                 dropout=None,
                 batch_norm=None,
                 out_type='binary',
                 train_fm=True):
        super(FNN, self).__init__()
        # set model object to training FNN or training FM embedding
        self.fm_trained = not train_fm

        # embedding layer is embedded in the FM sub-module
        self.emb_dim = emb_dim

        # fc layers
        if not fc_dims:
            fc_dims = [32, 32]
        self.fc_dims = fc_dims
        self.num_fields = num_fields
        self.fc_layers = MLP(emb_dim * num_fields, fc_dims, dropout,
                             batch_norm)

        # fm model as the pre-trained embedding layer
        self.fm = FM(emb_dim, feat_dim, out_type)

        # output
        self.output_layer = OutputLayer(fc_dims[-1], out_type)
    def __init__(self, emb_dim, num_feats, num_cate_fields, num_cont_fields, num_cross_feats, fc_dims=None,
                 dropout=None, batch_norm=None, out_type='binary'):
        super(WideAndDeep, self).__init__()
        self.emb_dim = emb_dim
        self.num_feats = num_feats
        self.num_cate_fields = num_cate_fields
        self.num_cont_fields = num_cont_fields
        self.num_cross_feats = num_cross_feats

        # first order weight for category features
        self.cate_weights = nn.Embedding(num_embeddings=num_feats - num_cont_fields, embedding_dim=1)
        nn.init.xavier_uniform_(self.cate_weights.weight)

        # first order weight for continuous features
        self.cont_weights = nn.Linear(in_features=num_cont_fields, out_features=1)
        nn.init.xavier_uniform_(self.cont_weights)

        self.wide_bias = nn.Parameter(torch.randn(1))

        if not fc_dims:
            fc_dims = [32, 32]
        fc_dims.append(1)
        self.fc_dims = fc_dims

        # embedding for deep network
        self.emb_layer = nn.Embedding(num_embeddings=num_feats - num_cont_fields, embedding_dim=emb_dim)
        nn.init.xavier_uniform_(self.emb_layer.weight)

        self.deep = MLP(num_cont_fields + num_cate_fields * emb_dim, fc_dims, dropout, batch_norm)
        self.out_layer = OutputLayer(in_dim=1, out_type=out_type)
Esempio n. 4
0
 def __init__(self, emb_dim, feat_dim, out_type='binary'):
     super(FM, self).__init__()
     self.emb_dim = emb_dim
     self.emb_layer = nn.Embedding(num_embeddings=feat_dim,
                                   embedding_dim=emb_dim)
     nn.init.xavier_uniform_(self.emb_layer.weight)
     self.bias = nn.Parameter(torch.randn(1))
     self.first_order_weights = nn.Embedding(num_embeddings=feat_dim,
                                             embedding_dim=1)
     nn.init.xavier_uniform_(self.first_order_weights.weight)
     self.output_layer = OutputLayer(1, out_type)
Esempio n. 5
0
 def __init__(self, emb_dim, feat_dim, num_fields, fc_dims=None, dropout=None, batch_norm=None, out_type='binary'):
     super(NFM, self).__init__()
     self.emb_dim = emb_dim
     self.feat_dim = feat_dim
     self.num_fields = num_fields
     self.emb_layer = nn.Embedding(num_embeddings=feat_dim, embedding_dim=emb_dim)
     self.bi_intaraction_layer = BiInteractionLayer()
     if not fc_dims:
         fc_dims = [32, 32]
     self.fc_dims = fc_dims
     self.fc_layers = MLP(emb_dim, fc_dims, dropout, batch_norm)
     self.output_layer = OutputLayer(in_dim=fc_dims[-1], out_type=out_type)
Esempio n. 6
0
 def __init__(self, emb_dim, feat_dim, num_cate_fields, num_cont_fields, num_cross_feats, fc_dims=None, dropout=None,
              batch_norm=None, out_type='binary'):
     super(WideAndDeep, self).__init__()
     self.emb_dim = emb_dim
     self.feat_dim = feat_dim
     self.num_cate_fields = num_cate_fields
     self.num_cont_fields = num_cont_fields
     self.num_cross_feats = num_cross_feats
     if not fc_dims:
         fc_dims = [32, 32]
     self.emb_layer = nn.Embedding(num_embeddings=feat_dim - num_cont_fields, embedding_dim=emb_dim)
     self.deep = MLP(num_cont_fields + num_cate_fields * emb_dim, fc_dims, dropout, batch_norm)
     self.wide = LR(num_cross_feats, out_type='regression')
     self.out_layer = OutputLayer(in_dim=fc_dims[-1] + 1, out_type=out_type)
Esempio n. 7
0
    def __init__(self,
                 emb_dim,
                 num_feats,
                 num_categories,
                 field_ranges,
                 fc_dims=None,
                 dropout=None,
                 batch_norm=None,
                 out_type='binary'):
        super(FLEN, self).__init__()
        self.num_feats = num_feats
        self.emb_dim = emb_dim
        self.num_categories = num_categories
        if not field_ranges:
            field_ranges = torch.tensor(range(num_categories))
        self.field_ranges = field_ranges
        self.num_fields = len(field_ranges)

        # embedding layer
        self.emb_layer = nn.Embedding(num_embeddings=num_feats,
                                      embedding_dim=emb_dim)
        nn.init.xavier_uniform_(self.emb_layer.weight)

        # S part
        self.first_order_weights = nn.Embedding(num_embeddings=num_categories,
                                                embedding_dim=1)
        nn.init.xavier_uniform_(self.first_order_weights.weight)
        self.first_order_bias = nn.Parameter(torch.randn(1))

        # MF part
        self.num_pairs = self.num_fields * (self.num_fields - 1) / 2
        self.r_mf = nn.Parameter(torch.zeros(self.num_pairs,
                                             1))  # num_pairs * 1
        nn.init.xavier_uniform_(self.r_mf.data)

        # FM part
        self.r_fm = nn.Parameter(torch.zeros(self.num_fields,
                                             1))  # num_fields * 1
        nn.init.xavier_uniform_(self.r_fm.data)

        # dnn
        if not fc_dims:
            fc_dims = [32, 32, 32]
        self.fc_dims = fc_dims
        self.fc_layers = MLP(fc_dims, dropout, batch_norm)

        self.output_layer = OutputLayer(fc_dims[-1] + 1 + self.emb_dim,
                                        out_type)
Esempio n. 8
0
    def __init__(self,
                 emb_dim,
                 feat_dim,
                 num_fields,
                 fc_dims=None,
                 dropout=None,
                 batch_norm=None,
                 product_type='inner',
                 out_type='binary'):
        super(PNN, self).__init__()
        # embedding layer
        self.emb_dim = emb_dim
        self.feat_dim = feat_dim
        self.num_fields = num_fields
        self.emb_layer = nn.Embedding(num_embeddings=self.feat_dim,
                                      embedding_dim=self.emb_dim)
        nn.init.xavier_uniform_(self.emb_layer.weight)

        # linear signal layer, named l_z
        if not fc_dims:
            fc_dims = [32, 32]
        self.d1 = d1 = fc_dims[0]
        self.product_type = product_type
        if product_type == '*':
            d1 *= 2
        self.linear_signal_weights = nn.Linear(in_features=num_fields *
                                               emb_dim,
                                               out_features=d1)
        nn.init.xavier_uniform_(self.linear_signal_weights.weight)

        # product layer, named l_p
        if product_type == 'inner':
            self.product_layer = InnerProductLayer(num_fields, d1)
        elif product_type == 'outer':
            self.product_layer = OuterProductLayer(emb_dim, num_fields, d1)
        else:
            self.product_layer = HybridProductLayer(emb_dim, num_fields, d1)

        # fc layers
        # l_1=relu(l_z+l_p_b_1)
        self.l1_layer = nn.ReLU()
        self.l1_bias = nn.Parameter(torch.randn(d1))
        # l_2 to l_n
        self.fc_dims = fc_dims
        self.fc_layers = MLP(d1, self.fc_dims, dropout, batch_norm)

        # output layer
        self.output_layer = OutputLayer(fc_dims[-1], out_type)
Esempio n. 9
0
    def __init__(self, emb_dim, num_feats, num_fields, att_weight_dim, out_type='binary'):
        super(AFM, self).__init__()
        self.emb_dim = emb_dim
        self.num_feats = num_feats
        self.num_fields = num_fields
        self.att_weight_dim = att_weight_dim
        self.first_order_weights = nn.Embedding(num_embeddings=num_feats, embedding_dim=1)
        nn.init.xavier_uniform_(self.first_order_weights.weight)
        self.bias = nn.Parameter(torch.randn(1))
        self.emb_layer = nn.Embedding(num_embeddings=num_feats, embedding_dim=emb_dim)
        nn.init.xavier_uniform_(self.emb_layer.weight)
        self.num_pairs = num_fields * (num_fields - 1) / 2

        self.att_pooling_layer = AttentionPairWiseInteractionLayer(self.num_pairs, emb_dim, att_weight_dim)

        self.output_layer = OutputLayer(1, out_type)
Esempio n. 10
0
    def __init__(self,
                 emb_dim,
                 projection_dim,
                 num_heads,
                 num_feats,
                 num_fields,
                 use_res=True,
                 out_type='binary'):
        super(AutoInt, self).__init__()
        self.emb_dim = emb_dim
        self.projection_dim = projection_dim
        self.num_heads = num_heads
        self.num_feats = num_feats
        self.num_fields = num_fields

        self.emb_layer = nn.Embedding(num_embeddings=num_feats,
                                      embedding_dim=emb_dim)
        nn.init.xavier_uniform_(self.emb_layer.weight)

        self.query_weights = nn.Parameter(
            torch.zeros(emb_dim, projection_dim * num_heads))
        nn.init.xavier_uniform_(self.query_weights.data)

        self.key_weights = nn.Parameter(
            torch.zeros(emb_dim, projection_dim * num_heads))
        nn.init.xavier_uniform_(self.key_weights.data)

        self.value_weights = nn.Parameter(
            torch.zeros(emb_dim, projection_dim * num_heads))
        nn.init.xavier_uniform_(self.value_weights.data)

        self.use_res = use_res
        if use_res:
            self.res_weights = nn.Parameter(
                torch.zeros(emb_dim, projection_dim * num_heads))
            nn.init.xavier_uniform_(self.res_weights.data)

        self.output_layer = OutputLayer(in_dim=num_fields * num_heads *
                                        projection_dim,
                                        out_type=out_type)
Esempio n. 11
0
    def __init__(self,
                 emb_dim,
                 num_feats,
                 num_cate_fields,
                 num_cont_fields,
                 cross_depth,
                 fc_dims=None,
                 dropout=None,
                 batch_norm=None,
                 out_type='binary'):
        super(DCN, self).__init__()
        self.emb_dim = emb_dim
        self.num_feats = num_feats
        self.num_cate_fields = num_cate_fields
        self.num_cont_fields = num_cont_fields

        self.cross_depth = cross_depth
        # embedding for category features
        self.emb_layer = nn.Embedding(num_embeddings=num_feats -
                                      num_cont_fields,
                                      embedding_dim=emb_dim)
        nn.init.xavier_uniform_(self.emb_layer.weight)

        # deep network
        if not fc_dims:
            fc_dims = [32, 32]
        self.fc_dims = fc_dims
        x0_dim = num_cont_fields + num_cate_fields * emb_dim
        self.deep = MLP(x0_dim, fc_dims, dropout, batch_norm)

        # cross network
        cross_layers = []
        for _ in range(cross_depth):
            cross_layers.append(CrossLayer(x0_dim))
        self.cross = nn.ModuleList(cross_layers)

        self.out_layer = OutputLayer(in_dim=fc_dims[-1] + x0_dim,
                                     out_type=out_type)
Esempio n. 12
0
    def __init__(self,
                 emb_dim,
                 feat_dim,
                 num_fields,
                 fc_dims=None,
                 dropout=None,
                 batch_norm=None,
                 out_type='binary'):
        super(DeepFM, self).__init__()
        # embedding layer is embedded in the FM sub-module
        self.emb_dim = emb_dim

        # fm
        self.fm = FM(emb_dim, feat_dim, out_type='regression')

        # dnn
        if not fc_dims:
            fc_dims = [32, 32, 32]
        self.fc_dims = fc_dims
        self.num_fields = num_fields
        self.dnn = MLP(emb_dim * num_fields, fc_dims, dropout, batch_norm)

        # output
        self.output_layer = OutputLayer(fc_dims[-1] + 1, out_type)
Esempio n. 13
0
 def __init__(self, num_feats, out_type='binary'):
     super(LR, self).__init__()
     self.num_feats = num_feats
     self.weights = nn.Embedding(num_embeddings=num_feats, embedding_dim=1)
     self.bias = nn.Parameter(torch.randn(1))
     self.output_layer = OutputLayer(1, out_type)