def __init__( self, field_dims: List[int], embed_dim: int, cross_layer_sizes: List[int], mlp_dims: Union[int, List[int]], dropout: Optional[Union[float, List[float]]] = 0.0, split_half: bool = True, ): super().__init__() self.linear = LinearEmbedder(field_dims, 1) self.embedding = Embedding(field_dims, embed_dim) self.embed_output_dim = len(field_dims) * embed_dim self.cin = CompressedInteractionNetwork( len(field_dims), cross_layer_sizes, split_half ) if isinstance(mlp_dims, int): mlp_layers = 1 else: mlp_layers = len(mlp_dims) # + 1 to include the final layer where the output dim is 1 self.mlp = FeedForward( num_layers=mlp_layers, input_dim=self.embed_output_dim, hidden_dims=mlp_dims, batch_norm=True, activations=torch.nn.ReLU(), dropouts=dropout, ) # We need to separate cuz output layer doesn't have batch norm self.output_layer = torch.nn.Linear(mlp_dims[-1], 1)
def __init__(self, field_dims, embed_dim, mlp_dims, dropout): super().__init__() self.linear = LinearEmbedder(field_dims, 1) self.embedding = Embedding(field_dims, embed_dim) self.embed_output_dim = len(field_dims) * embed_dim self.mlp = FeedForward(2, self.embed_output_dim, [mlp_dims, 1], True, ['relu', 'linear'], [dropout, 0])
def __init__(self, field_dims, embed_dim, mlp_dims, dropout): super().__init__() self.linear = LinearEmbedder(field_dims, 1) self.fm = FactorizationMachine(reduce_sum=True) self.embedding = Embedding(field_dims, embed_dim) self.embed_output_dim = len(field_dims) * embed_dim self.mlp = torch.nn.Sequential( FeedForward(2, self.embed_output_dim, mlp_dims, True, torch.nn.ReLU(), dropout), Linear(mlp_dims[-1], 1))
def __init__(self, field_dims, embed_dim, mlp_dims, dropouts): super().__init__() self.embedding = Embedding(field_dims, embed_dim) self.linear = LinearEmbedder(field_dims, 1) self.fm = torch.nn.Sequential( FactorizationMachine(reduce_sum=False), torch.nn.BatchNorm1d(embed_dim), torch.nn.Dropout(dropouts[0]) ) self.mlp = Sequential(FeedForward(num_layers=1, input_dim=embed_dim, hidden_dims=mlp_dims, batch_norm=True, activations=torch.nn.ReLU(), dropouts=dropouts[1]), Linear(mlp_dims[-1], 1))
def __init__(self, field_dims, embed_dim, num_layers, mlp_dims, dropout): super().__init__() self.embedding = Embedding(field_dims, embed_dim) self.linear = LinearEmbedder(field_dims, 1) self.embed_output_dim = len(field_dims) * embed_dim self.cn = CrossNetwork(self.embed_output_dim, num_layers) self.cn_output = torch.nn.Linear(self.embed_output_dim, 1) if isinstance(mlp_dims, int): mlp_layers = 1 else: mlp_layers = len(mlp_dims) self.mlp = Sequential(FeedForward(num_layers=mlp_layers, input_dim=self.embed_output_dim, hidden_dims=mlp_dims, batch_norm=True, activations=torch.nn.ReLU(), dropouts=dropout), Linear(mlp_dims[-1], 1))
def __init__(self, field_dims, embed_dim, num_heads, num_layers, mlp_dims, dropouts): super().__init__() self.num_fields = len(field_dims) self.linear = LinearEmbedder(field_dims, 1) self.embedding = Embedding(field_dims, embed_dim) self.embed_output_dim = len(field_dims) * embed_dim self.self_attention = torch.nn.ModuleList([ torch.nn.MultiheadAttention(embed_dim, num_heads, dropout=dropouts[0]) for _ in range(num_layers) ]) self.mlp = FeedForward(num_layers=2, input_dim=self.embed_output_dim, hidden_dims=mlp_dims, batch_norm=True, activations=torch.nn.ReLU(), dropouts=dropouts[1]) self.output_linear = torch.nn.Linear(mlp_dims[-1], 1) self.attention_linear = torch.nn.Linear(self.embed_output_dim, 1)
def __init__(self, field_dims, embed_dim, mlp_dims, dropout, method='inner'): super().__init__() num_fields = len(field_dims) if method == 'inner': self.pn = InnerProductNetwork() elif method == 'outer': self.pn = OuterProductNetwork(num_fields, embed_dim) else: raise ValueError('unknown product type: ' + method) self.embedding = Embedding(field_dims, embed_dim) self.linear = LinearEmbedder(field_dims, embed_dim) self.embed_output_dim = num_fields * embed_dim self.mlp = FeedForward( 2, num_fields * (num_fields - 1) // 2 + self.embed_output_dim, [mlp_dims, 1], True, ['relu', 'linear'], [dropout, 0])
def __init__(self, field_dims, embed_dim): super().__init__() self.embedding = Embedding(field_dims, embed_dim) self.linear = LinearEmbedder(field_dims, 1) self.fm = FactorizationMachine(reduce_sum=True)
def __init__(self, field_dims, embed_dim, attn_size, dropouts): super().__init__() self.num_fields = len(field_dims) self.embedding = Embedding(field_dims, embed_dim) self.linear = LinearEmbedder(field_dims, 1) self.afm = AttentionalFactorizationLayer(embed_dim, attn_size, dropouts)