def __init__(self, opt): super(FM, self).__init__() self.use_cuda = opt.get('use_cuda') self.latent_dim = opt['latent_dim'] self.field_dims = opt['field_dims'] self.feature_num = sum(self.field_dims) self.embedding = PEPEmbedding(opt) self.linear = FeaturesLinear(self.field_dims) # linear part self.fm = FactorizationMachine(reduce_sum=True) print("BackBone Embedding Parameters: ", self.feature_num * self.latent_dim)
def __init__(self, field_dims, embed_dim): super().__init__() self.embedding = FeaturesEmbedding(field_dims, embed_dim) self.linear = FeaturesLinear(field_dims) self.fm = FactorizationMachine(reduce_sum=True)
def __init__(self, field_dims, embed_dim, atten_embed_dim, num_heads, num_layers, mlp_dims, dropouts, has_residual=True): super().__init__() self.num_fields = len(field_dims) self.linear = FeaturesLinear(field_dims) self.embedding = FeaturesEmbedding(field_dims, embed_dim) self.atten_embedding = torch.nn.Linear(embed_dim, atten_embed_dim) self.embed_output_dim = len(field_dims) * embed_dim self.atten_output_dim = len(field_dims) * atten_embed_dim self.has_residual = has_residual self.mlp = MultiLayerPerceptron(self.embed_output_dim, mlp_dims, dropouts[1]) self.self_attns = torch.nn.ModuleList([ torch.nn.MultiheadAttention(atten_embed_dim, num_heads, dropout=dropouts[0]) for _ in range(num_layers) ]) self.attn_fc = torch.nn.Linear(self.atten_output_dim, 1) if self.has_residual: self.V_res_embedding = torch.nn.Linear(embed_dim, atten_embed_dim)
def __init__(self, field_dims, embed_dim, mlp_dims, dropout): super().__init__() self.linear = FeaturesLinear(field_dims) self.fm = FactorizationMachine(reduce_sum=True) self.embedding = FeaturesEmbedding(field_dims, embed_dim) self.embed_output_dim = len(field_dims) * embed_dim self.mlp = MultiLayerPerceptron(self.embed_output_dim, mlp_dims, dropout)
def __init__(self, field_dims, embed_dim, mlp_dims, dropouts): super().__init__() self.embedding = FeaturesEmbedding(field_dims, embed_dim) self.linear = FeaturesLinear(field_dims) self.fm = torch.nn.Sequential(FactorizationMachine(reduce_sum=False), torch.nn.BatchNorm1d(embed_dim), torch.nn.Dropout(dropouts[0])) self.mlp = MultiLayerPerceptron(embed_dim, mlp_dims, dropouts[1])
def __init__(self, field_dims, embed_dim, num_layers, mlp_dims, dropout): super().__init__() self.embedding = FeaturesEmbedding(field_dims, embed_dim) self.linear = FeaturesLinear(field_dims) self.embed_output_dim = len(field_dims) * embed_dim self.cn = CrossNetwork(self.embed_output_dim, num_layers) self.cn_output = torch.nn.Linear(self.embed_output_dim, 1) self.mlp = MultiLayerPerceptron(self.embed_output_dim, mlp_dims, dropout)
def __init__(self, field_dims, embed_dim, mlp_dims, dropout): super().__init__() self.linear = FeaturesLinear(field_dims) self.embedding = FeaturesEmbedding(field_dims, embed_dim) self.embed_output_dim = len(field_dims) * embed_dim self.mlp = MultiLayerPerceptron(self.embed_output_dim, mlp_dims, dropout)
def __init__(self, field_dims, embed_dim, attn_size, dropouts): super().__init__() self.num_fields = len(field_dims) self.embedding = FeaturesEmbedding(field_dims, embed_dim) self.linear = FeaturesLinear(field_dims) self.afm = AttentionalFactorizationMachine(embed_dim, attn_size, dropouts)
def __init__(self, field_dims, embed_dim, LNN_dim, mlp_dims, dropouts): super().__init__() self.num_fields = len(field_dims) self.linear = FeaturesLinear(field_dims) # Linear self.embedding = FeaturesEmbedding(field_dims, embed_dim) # Embedding self.LNN_dim = LNN_dim self.LNN_output_dim = self.LNN_dim * embed_dim self.LNN = LNN(self.num_fields, embed_dim, LNN_dim) self.mlp = MultiLayerPerceptron(self.LNN_output_dim, mlp_dims, dropouts[0])
def __init__(self, field_dims, embed_dim, mlp_dims, dropouts): super().__init__() self.linear = FeaturesLinear(field_dims) self.ffm = FieldAwareFactorizationMachine(field_dims, embed_dim) self.ffm_output_dim = len(field_dims) * (len(field_dims) - 1) // 2 * embed_dim self.bn = torch.nn.BatchNorm1d(self.ffm_output_dim) self.dropout = torch.nn.Dropout(dropouts[0]) self.mlp = MultiLayerPerceptron(self.ffm_output_dim, mlp_dims, dropouts[1])
def __init__(self, field_dims, embed_dim, mlp_dims, dropout, method='inner'): super().__init__() num_fields = len(field_dims) if method == 'inner': self.pn = InnerProductNetwork() elif method == 'outer': self.pn = OuterProductNetwork(num_fields, embed_dim) else: raise ValueError('unknown product type: ' + method) self.embedding = FeaturesEmbedding(field_dims, embed_dim) self.linear = FeaturesLinear(field_dims, embed_dim) self.embed_output_dim = num_fields * embed_dim self.mlp = MultiLayerPerceptron(num_fields * (num_fields - 1) // 2 + self.embed_output_dim, mlp_dims, dropout)
def __init__(self, field_dims, obsItem_dims, obsUser_dims, embed_dim, obs, embed): super().__init__() # print(field_dims, embed_dim) self.embedding = FeaturesEmbedding(field_dims, embed_dim) # print(field_dims[0:1], obsItem_dims) # input() # if obs: self.obsItem_coeff = FeaturesEmbedding(field_dims[0:1], obsItem_dims) self.obsUser_coeff = FeaturesEmbedding(field_dims[1:2], obsUser_dims) self.linear = FeaturesLinear(field_dims, bias=False) self.fm = FactorizationMachine(reduce_sum=True) self.obs = obs self.embed = embed assert obs or embed, "One of obs or embed must be true\n"
def __init__(self, field_dims, embed_dim, mlp_dims, dropout, cross_layer_sizes, split_half=True): super().__init__() self.embedding = FeaturesEmbedding(field_dims, embed_dim) self.embed_output_dim = len(field_dims) * embed_dim self.cin = CompressedInteractionNetwork(len(field_dims), cross_layer_sizes, split_half) self.mlp = MultiLayerPerceptron(self.embed_output_dim, mlp_dims, dropout) self.linear = FeaturesLinear(field_dims)
def __init__(self, field_dims, order, embed_dim): super().__init__() if order < 1: raise ValueError(f'invalid order: {order}') self.order = order self.embed_dim = embed_dim self.linear = FeaturesLinear(field_dims) if order >= 2: self.embedding = FeaturesEmbedding(field_dims, embed_dim * (order - 1)) self.fm = FactorizationMachine(reduce_sum=True) if order >= 3: self.kernels = torch.nn.ModuleList([ AnovaKernel(order=i, reduce_sum=True) for i in range(3, order + 1) ])
def __init__(self, field_dims, embed_dim, mlp_dims, dropout, training_method='dfa'): super().__init__() self.linear = FeaturesLinear(field_dims) self.fm = FactorizationMachine(reduce_sum=True) self.embedding = FeaturesEmbedding( field_dims, embed_dim) # Trained through FM. OK: no weights in FM. self.embed_output_dim = len(field_dims) * embed_dim self.mlp = DFAMultiLayerPerceptron(self.embed_output_dim, mlp_dims, dropout, training_method=training_method)
def __init__(self, field_dims, embed_dim, num_heads, num_layers, mlp_dims, dropouts): super().__init__() self.embed_dim = embed_dim self.num_fields = len(field_dims) self.linear = FeaturesLinear(field_dims) self.embedding = FeaturesEmbedding(field_dims, embed_dim) self.embed_output_dim = len(field_dims) * embed_dim # self.res = torch.nn.Linear(self.embed_output_dim,self.embed_output_dim) self.mlp = MultiLayerPerceptron(self.embed_output_dim + 399, mlp_dims, dropouts[1]) self.self_attns = torch.nn.ModuleList([ torch.nn.MultiheadAttention(embed_dim, num_heads, dropout=dropouts[0]) for _ in range(num_layers) ]) self.attn_fc = torch.nn.Linear(self.embed_output_dim, 1)
def __init__(self, field_dims, embed_dim, atten_embed_dim, num_heads, num_layers, mlp_dims, dropouts, has_residual=True, training_method='dfa'): super().__init__() self.num_fields = len(field_dims) self.linear = FeaturesLinear(field_dims) self.embedding = FeaturesEmbedding(field_dims, embed_dim) self.dfa_embedding = DFALayer() self.atten_embedding = torch.nn.Linear(embed_dim, atten_embed_dim) self.dfa_atten_embedding = DFALayer() self.embed_output_dim = len(field_dims) * embed_dim self.atten_output_dim = len(field_dims) * atten_embed_dim self.has_residual = has_residual self.mlp = DFAMultiLayerPerceptron(self.embed_output_dim, mlp_dims, dropouts[1], dfa_output=False) self.self_attns = torch.nn.ModuleList([ torch.nn.MultiheadAttention(atten_embed_dim, num_heads, dropout=dropouts[0]) for _ in range(num_layers) ]) self.dfa_self_attns = [DFALayer() for _ in range(num_layers - 1)] self.attn_fc = torch.nn.Linear(self.atten_output_dim, 1) if self.has_residual: self.V_res_embedding = torch.nn.Linear(embed_dim, atten_embed_dim) self.dfa_cross = DFALayer() self.dfa = DFA(dfa_layers=[ self.dfa_atten_embedding, *self.dfa_self_attns, self.dfa_cross, *self.mlp.dfa_layers, self.dfa_embedding ], feedback_points_handling=FeedbackPointsHandling.LAST, no_training=training_method != 'dfa')
class FM(torch.nn.Module): """Factorization Machines""" def __init__(self, opt): super(FM, self).__init__() self.use_cuda = opt.get('use_cuda') self.latent_dim = opt['latent_dim'] self.field_dims = opt['field_dims'] self.feature_num = sum(self.field_dims) self.embedding = PEPEmbedding(opt) self.linear = FeaturesLinear(self.field_dims) # linear part self.fm = FactorizationMachine(reduce_sum=True) print("BackBone Embedding Parameters: ", self.feature_num * self.latent_dim) def forward(self, x): linear_score = self.linear.forward(x) xv = self.embedding(x) fm_score = self.fm.forward(xv) score = linear_score + fm_score return score.squeeze(1) def l2_penalty(self, x, lamb): xv = self.embedding(x) xv_sq = xv.pow(2) xv_penalty = xv_sq * lamb xv_penalty = xv_penalty.sum() return xv_penalty def calc_sparsity(self): base = self.feature_num * self.latent_dim non_zero_values = torch.nonzero(self.embedding.sparse_v).size(0) percentage = 1 - (non_zero_values / base) return percentage, non_zero_values def get_threshold(self): return self.embedding.g(self.embedding.s) def get_embedding(self): return self.embedding.sparse_v.detach().cpu().numpy()
class LR(torch.nn.Module): def __init__(self, opt): super(LR, self).__init__() self.use_cuda = opt.get('use_cuda') self.field_dims = opt['field_dims'] self.linear = FeaturesLinear(self.field_dims) # linear part def forward(self, x): """Compute Score""" score = self.linear.forward(x) return score.squeeze(1) def l2_penalty(self, x, lamb): return 0 def calc_sparsity(self): return 0, 0 def get_threshold(self): return 0 def get_embedding(self): return np.zeros(1)
def __init__(self, field_dims, embed_dim, LNN_dim, mlp_dims, dropouts, training_method='dfa'): super().__init__() self.num_fields = len(field_dims) self.linear = FeaturesLinear(field_dims) # Linear self.embedding = FeaturesEmbedding(field_dims, embed_dim) # Embedding self.dfa_embed = DFALayer() self.LNN_dim = LNN_dim self.LNN_output_dim = self.LNN_dim * embed_dim self.LNN = LNN(self.num_fields, embed_dim, LNN_dim) self.dfa_lnn = DFALayer() self.mlp = DFAMultiLayerPerceptron(self.LNN_output_dim, mlp_dims, dropouts[0], dfa_output=False) self.dfa = DFA(dfa_layers=[self.dfa_lnn, *self.mlp.dfa_layers], feedback_points_handling=FeedbackPointsHandling.LAST, no_training=training_method != 'dfa')
def __init__(self, field_dims, embed_dim): super().__init__() self.linear = FeaturesLinear(field_dims) self.ffm = FieldAwareFactorizationMachine(field_dims, embed_dim)
def __init__(self, field_dims, t, lam): super().__init__() self.linear = FeaturesLinear(field_dims, t, lam)
def __init__(self, opt): super(LR, self).__init__() self.use_cuda = opt.get('use_cuda') self.field_dims = opt['field_dims'] self.linear = FeaturesLinear(self.field_dims) # linear part