def __init__(self, field_dims, embed_dim, normalize, sparse_embedding, mlp_dims, method='inner'): super().__init__() num_fields = len(field_dims) if method == 'inner': self.pn = InnerProductNetwork() elif method == 'outer': raise NotImplementedError() self.pn = OuterProductNetwork(num_fields, embed_dim) else: raise ValueError('unknown product type: ' + method) self.embedding = FeaturesEmbedding(field_dims, embed_dim, sparse_embedding=sparse_embedding, normalize=normalize) self.linear = FeaturesLinear(field_dims, embed_dim, sparse_embedding=sparse_embedding) self.embed_output_dim = num_fields * embed_dim self.mlp = MultiLayerPerceptronV2(num_fields * (num_fields - 1) // 2 + self.embed_output_dim, mlp_dims, normalize="none")
def __init__(self, field_dims, embed_dim, normalize, sparse_embedding, attn_size, dropouts): super().__init__() self.num_fields = len(field_dims) self.embedding = FeaturesEmbedding(field_dims, embed_dim, normalize=normalize, sparse_embedding=sparse_embedding) self.linear = FeaturesLinear(field_dims, sparse_embedding=sparse_embedding) self.afm = AttentionalFactorizationMachine(embed_dim, attn_size, dropouts)
def __init__(self, field_dims, embed_dim, normalize, sparse_embedding, mlp_dims): super().__init__() self.linear = FeaturesLinear(field_dims, sparse_embedding=sparse_embedding) self.embedding = FeaturesEmbedding(field_dims, embed_dim, sparse_embedding=sparse_embedding, normalize=normalize) self.embed_output_dim = len(field_dims) * embed_dim self.mlp = MultiLayerPerceptronV2(self.embed_output_dim, mlp_dims)
def __init__(self, field_dims, embed_dim, normalize, sparse_embedding, LNN_dim, mlp_dims, dropouts): super().__init__() self.num_fields = len(field_dims) self.linear = FeaturesLinear( field_dims, sparse_embedding=sparse_embedding) # Linear self.embedding = FeaturesEmbedding( field_dims, embed_dim, normalize=normalize, sparse_embedding=sparse_embedding) # Embedding self.LNN_dim = LNN_dim self.LNN_output_dim = self.LNN_dim * embed_dim self.LNN = LNN(self.num_fields, embed_dim, LNN_dim) self.mlp = MultiLayerPerceptron(self.LNN_output_dim, mlp_dims, dropouts[0])
def __init__(self, field_dims, embed_dim, atten_embed_dim, num_heads, num_layers, mlp_dims, attention_dropout, mlp_dropout, has_residual=True): super().__init__() self.num_fields = len(field_dims) self.linear = FeaturesLinear(field_dims) self.embedding = FeaturesEmbedding(field_dims, embed_dim) self.atten_linear = torch.nn.Linear(embed_dim, atten_embed_dim) self.embed_output_dim = len(field_dims) * embed_dim self.atten_output_dim = len(field_dims) * atten_embed_dim self.has_residual = has_residual self.mlp = MultiLayerPerceptron(self.embed_output_dim, mlp_dims, mlp_dropout) self.self_attns = torch.nn.ModuleList([ torch.nn.MultiheadAttention(atten_embed_dim, num_heads, dropout=attention_dropout) for _ in range(num_layers) ]) self.attn_fc = torch.nn.Linear(self.atten_output_dim, 1) if self.has_residual: self.V_res_linear = torch.nn.Linear(embed_dim, atten_embed_dim)
def __init__(self, field_dims, embed_dim, normalize, sparse_embedding, mlp_dims, cross_layer_sizes, split_half=True): super().__init__() self.embedding = FeaturesEmbedding(field_dims, embed_dim, normalize=normalize, sparse_embedding=sparse_embedding) self.linear = FeaturesLinear(field_dims, sparse_embedding=sparse_embedding) self.embed_output_dim = len(field_dims) * embed_dim self.cin = CompressedInteractionNetwork(len(field_dims), cross_layer_sizes, split_half) self.mlp = MultiLayerPerceptronV2(self.embed_output_dim, mlp_dims, output_layer=True)
def __init__( self, field_dims, embed_dim, normalize, sparse_embedding, mlp_dims, embedding_dropout=0.5, hidden_dropout=0.3, embedding_bn=True, layer_bn=True, ): super().__init__() self.embedding = FeaturesEmbedding(field_dims, embed_dim, normalize=normalize, sparse_embedding=sparse_embedding) self.linear = FeaturesLinear(field_dims, sparse_embedding=sparse_embedding) if embedding_bn: self.fm = torch.nn.Sequential( FactorizationMachine(reduce_sum=False), torch.nn.BatchNorm1d(embed_dim), torch.nn.Dropout(embedding_dropout)) else: self.fm = torch.nn.Sequential( FactorizationMachine(reduce_sum=False), torch.nn.Dropout(embedding_dropout)) self.mlp = MultiLayerPerceptronV2( embed_dim, mlp_dims, output_layer=True, dropout=hidden_dropout, normalize="bn" if layer_bn else "none", )