def __init__(self, opt): super(FM, self).__init__() self.use_cuda = opt.get('use_cuda') self.latent_dim = opt['latent_dim'] self.field_dims = opt['field_dims'] self.feature_num = sum(self.field_dims) self.embedding = PEPEmbedding(opt) self.linear = FeaturesLinear(self.field_dims) # linear part self.fm = FactorizationMachine(reduce_sum=True) print("BackBone Embedding Parameters: ", self.feature_num * self.latent_dim)
def __init__(self, field_dims, embed_dim): super().__init__() self.embedding = FeaturesEmbedding(field_dims, embed_dim) self.linear = FeaturesLinear(field_dims) self.fm = FactorizationMachine(reduce_sum=True)
def __init__(self, field_dims, embed_dim, mlp_dims, dropout): super().__init__() self.linear = FeaturesLinear(field_dims) self.fm = FactorizationMachine(reduce_sum=True) self.embedding = FeaturesEmbedding(field_dims, embed_dim) self.embed_output_dim = len(field_dims) * embed_dim self.mlp = MultiLayerPerceptron(self.embed_output_dim, mlp_dims, dropout)
def __init__(self, field_dims, embed_dim, mlp_dims, dropouts): super().__init__() self.embedding = FeaturesEmbedding(field_dims, embed_dim) self.linear = FeaturesLinear(field_dims) self.fm = torch.nn.Sequential(FactorizationMachine(reduce_sum=False), torch.nn.BatchNorm1d(embed_dim), torch.nn.Dropout(dropouts[0])) self.mlp = MultiLayerPerceptron(embed_dim, mlp_dims, dropouts[1])
def __init__(self, field_dims, obsItem_dims, obsUser_dims, embed_dim, obs, embed): super().__init__() # print(field_dims, embed_dim) self.embedding = FeaturesEmbedding(field_dims, embed_dim) # print(field_dims[0:1], obsItem_dims) # input() # if obs: self.obsItem_coeff = FeaturesEmbedding(field_dims[0:1], obsItem_dims) self.obsUser_coeff = FeaturesEmbedding(field_dims[1:2], obsUser_dims) self.linear = FeaturesLinear(field_dims, bias=False) self.fm = FactorizationMachine(reduce_sum=True) self.obs = obs self.embed = embed assert obs or embed, "One of obs or embed must be true\n"
def __init__(self, field_dims, order, embed_dim): super().__init__() if order < 1: raise ValueError(f'invalid order: {order}') self.order = order self.embed_dim = embed_dim self.linear = FeaturesLinear(field_dims) if order >= 2: self.embedding = FeaturesEmbedding(field_dims, embed_dim * (order - 1)) self.fm = FactorizationMachine(reduce_sum=True) if order >= 3: self.kernels = torch.nn.ModuleList([ AnovaKernel(order=i, reduce_sum=True) for i in range(3, order + 1) ])
def __init__(self, field_dims, embed_dim, mlp_dims, dropout, training_method='dfa'): super().__init__() self.linear = FeaturesLinear(field_dims) self.fm = FactorizationMachine(reduce_sum=True) self.embedding = FeaturesEmbedding( field_dims, embed_dim) # Trained through FM. OK: no weights in FM. self.embed_output_dim = len(field_dims) * embed_dim self.mlp = DFAMultiLayerPerceptron(self.embed_output_dim, mlp_dims, dropout, training_method=training_method)
class FM(torch.nn.Module): """Factorization Machines""" def __init__(self, opt): super(FM, self).__init__() self.use_cuda = opt.get('use_cuda') self.latent_dim = opt['latent_dim'] self.field_dims = opt['field_dims'] self.feature_num = sum(self.field_dims) self.embedding = PEPEmbedding(opt) self.linear = FeaturesLinear(self.field_dims) # linear part self.fm = FactorizationMachine(reduce_sum=True) print("BackBone Embedding Parameters: ", self.feature_num * self.latent_dim) def forward(self, x): linear_score = self.linear.forward(x) xv = self.embedding(x) fm_score = self.fm.forward(xv) score = linear_score + fm_score return score.squeeze(1) def l2_penalty(self, x, lamb): xv = self.embedding(x) xv_sq = xv.pow(2) xv_penalty = xv_sq * lamb xv_penalty = xv_penalty.sum() return xv_penalty def calc_sparsity(self): base = self.feature_num * self.latent_dim non_zero_values = torch.nonzero(self.embedding.sparse_v).size(0) percentage = 1 - (non_zero_values / base) return percentage, non_zero_values def get_threshold(self): return self.embedding.g(self.embedding.s) def get_embedding(self): return self.embedding.sparse_v.detach().cpu().numpy()