コード例 #1
0
    def __init__(self, config, dataset):
        super(MacridVAE, self).__init__(config, dataset)

        self.layers = config['encoder_hidden_size']
        self.embedding_size = config['embedding_size']
        self.drop_out = config['drop_out']
        self.kfac = config['kfac']
        self.tau = config['tau']
        self.nogb = config['nogb']
        self.anneal_cap = config['anneal_cap']
        self.total_anneal_steps = config['total_anneal_steps']
        self.regs = config['reg_weights']
        self.std = config['std']

        self.update = 0

        self.history_item_id, self.history_item_value, _ = dataset.history_item_matrix()
        self.history_item_id = self.history_item_id.to(self.device)
        self.history_item_value = self.history_item_value.to(self.device)
        self.encode_layer_dims = [self.n_items] + self.layers + [self.embedding_size * 2]

        self.encoder = self.mlp_layers(self.encode_layer_dims)

        self.item_embedding = nn.Embedding(self.n_items, self.embedding_size)
        self.k_embedding = nn.Embedding(self.kfac, self.embedding_size)

        self.l2_loss = EmbLoss()
        # parameters initialization
        self.apply(xavier_normal_initialization)
コード例 #2
0
    def __init__(self, config, dataset):
        super(LightGCN, self).__init__(config, dataset)

        # load dataset info
        self.interaction_matrix = dataset.inter_matrix(form='coo').astype(
            np.float32)

        # load parameters info
        self.latent_dim = config[
            'embedding_size']  # int type:the embedding size of lightGCN
        self.n_layers = config[
            'n_layers']  # int type:the layer num of lightGCN
        self.reg_weight = config[
            'reg_weight']  # float32 type: the weight decay for l2 normalizaton

        # define layers and loss
        self.user_embedding = torch.nn.Embedding(num_embeddings=self.n_users,
                                                 embedding_dim=self.latent_dim)
        self.item_embedding = torch.nn.Embedding(num_embeddings=self.n_items,
                                                 embedding_dim=self.latent_dim)
        self.mf_loss = BPRLoss()
        self.reg_loss = EmbLoss()

        # storage variables for full sort evaluation acceleration
        self.restore_user_e = None
        self.restore_item_e = None

        # generate intermediate data
        self.norm_adj_matrix = self.get_norm_adj_mat().to(self.device)

        # parameters initialization
        self.apply(xavier_uniform_initialization)
コード例 #3
0
ファイル: ngcf.py プロジェクト: zhaoyone/RecBole
    def __init__(self, config, dataset):
        super(NGCF, self).__init__(config, dataset)

        # load dataset info
        self.interaction_matrix = dataset.inter_matrix(form='coo').astype(
            np.float32)

        # load parameters info
        self.embedding_size = config['embedding_size']
        self.hidden_size_list = config['hidden_size_list']
        self.hidden_size_list = [self.embedding_size] + self.hidden_size_list
        self.node_dropout = config['node_dropout']
        self.message_dropout = config['message_dropout']
        self.reg_weight = config['reg_weight']

        # define layers and loss
        self.user_embedding = nn.Embedding(self.n_users, self.embedding_size)
        self.item_embedding = nn.Embedding(self.n_items, self.embedding_size)
        self.GNNlayers = torch.nn.ModuleList()
        for idx, (input_size, output_size) in enumerate(
                zip(self.hidden_size_list[:-1], self.hidden_size_list[1:])):
            self.GNNlayers.append(BiGNNLayer(input_size, output_size))
        self.mf_loss = BPRLoss()
        self.reg_loss = EmbLoss()

        # storage variables for full sort evaluation acceleration
        self.restore_user_e = None
        self.restore_item_e = None

        # generate intermediate data
        self.norm_adj_matrix = self.get_norm_adj_mat().to(self.device)
        self.eye_matrix = self.get_eye_mat().to(self.device)

        # parameters initialization
        self.apply(xavier_normal_initialization)
コード例 #4
0
ファイル: transrec.py プロジェクト: xingkongxiaxia/xx
    def __init__(self, config, dataset):
        super(TransRec, self).__init__(config, dataset)

        # load parameters info
        self.embedding_size = config['embedding_size']

        # load dataset info
        self.n_users = dataset.user_num

        self.user_embedding = nn.Embedding(self.n_users,
                                           self.embedding_size,
                                           padding_idx=0)
        self.item_embedding = nn.Embedding(self.n_items,
                                           self.embedding_size,
                                           padding_idx=0)
        self.bias = nn.Embedding(self.n_items, 1,
                                 padding_idx=0)  # Beta popularity bias
        self.T = nn.Parameter(torch.zeros(
            self.embedding_size))  # average user representation 'global'

        self.bpr_loss = BPRLoss()
        self.emb_loss = EmbLoss()
        self.reg_loss = RegLoss()

        # parameters initialization
        self.apply(xavier_normal_initialization)
コード例 #5
0
ファイル: spectralcf.py プロジェクト: rowedenny/RecBole
    def __init__(self, config, dataset):
        super(SpectralCF, self).__init__(config, dataset)

        # load parameters info
        self.n_layers = config['n_layers']
        self.emb_dim = config['embedding_size']
        self.reg_weight = config['reg_weight']

        # generate intermediate data
        # "A_hat = I + L" is equivalent to "A_hat = U U^T + U \Lambda U^T"
        self.interaction_matrix = dataset.inter_matrix(
            form='coo').astype(np.float32)
        I = self.get_eye_mat(self.n_items + self.n_users)
        L = self.get_laplacian_matrix()
        A_hat = I + L
        self.A_hat = A_hat.to(self.device)

        # define layers and loss
        self.user_embedding = torch.nn.Embedding(
            num_embeddings=self.n_users, embedding_dim=self.emb_dim)
        self.item_embedding = torch.nn.Embedding(
            num_embeddings=self.n_items, embedding_dim=self.emb_dim)
        self.filters = torch.nn.ParameterList(
            [torch.nn.Parameter(torch.normal(mean=0.01, std=0.02, size=(self.emb_dim, self.emb_dim)).to(self.device),
                                requires_grad=True)
             for _ in range(self.n_layers)])

        self.sigmoid = torch.nn.Sigmoid()
        self.mf_loss = BPRLoss()
        self.reg_loss = EmbLoss()
        self.restore_user_e = None
        self.restore_item_e = None

        # parameters initialization
        self.apply(xavier_uniform_initialization)
コード例 #6
0
    def __init__(self, config, dataset):
        super(TransRecF, self).__init__(config, dataset)

        # load parameters info
        self.embedding_size = config['embedding_size']

        # load dataset info
        self.n_users = dataset.user_num

        self.user_embedding = nn.Embedding(self.n_users, self.embedding_size, padding_idx=0)
        self.item_embedding = nn.Embedding(self.n_items, self.embedding_size, padding_idx=0)
        self.bias = nn.Embedding(self.n_items, 1, padding_idx=0)  # Beta popularity bias
        self.T = nn.Parameter(torch.zeros(self.embedding_size))  # average user representation 'global'
        self.selected_features = config['selected_features']
        self.hidden_dropout_prob = config['hidden_dropout_prob']
        self.pooling_mode = config['pooling_mode']
        self.dropout = nn.Dropout(self.hidden_dropout_prob)
        self.layer_norm_eps = config['layer_norm_eps']
        self.device = config['device']
        self.num_feature_field = len(config['selected_features'])
        self.bpr_loss = BPRLoss()
        self.emb_loss = EmbLoss()
        self.reg_loss = RegLoss()
        self.feature_embed_layer = FeatureSeqEmbLayer(
            dataset, self.embedding_size, self.selected_features, self.pooling_mode, self.device
        )
        self.LayerNorm = nn.LayerNorm(self.embedding_size, eps=self.layer_norm_eps)
        self.concat_layer = nn.Linear(self.embedding_size * (1 + self.num_feature_field), self.embedding_size)
        # parameters initialization
        self.apply(xavier_normal_initialization)
コード例 #7
0
    def __init__(self, config, dataset):
        super(DGCF, self).__init__(config, dataset)

        # load dataset info
        self.interaction_matrix = dataset.inter_matrix(form='coo').astype(
            np.float32)

        # load parameters info
        self.embedding_size = config['embedding_size']
        self.n_factors = config['n_factors']
        self.n_iterations = config['n_iterations']
        self.n_layers = config['n_layers']
        self.reg_weight = config['reg_weight']
        self.cor_weight = config['cor_weight']
        n_batch = dataset.dataset.inter_num // self.batch_size + 1
        self.cor_batch_size = int(
            max(self.n_users / n_batch, self.n_items / n_batch))
        # ensure embedding can be divided into <n_factors> intent
        assert self.embedding_size % self.n_factors == 0

        # generate intermediate data
        row = self.interaction_matrix.row.tolist()
        col = self.interaction_matrix.col.tolist()
        col = [item_index + self.n_users for item_index in col]
        all_h_list = row + col  # row.extend(col)
        all_t_list = col + row  # col.extend(row)
        num_edge = len(all_h_list)
        edge_ids = range(num_edge)
        self.all_h_list = torch.LongTensor(all_h_list).to(self.device)
        self.all_t_list = torch.LongTensor(all_t_list).to(self.device)
        self.edge2head = torch.LongTensor([all_h_list,
                                           edge_ids]).to(self.device)
        self.head2edge = torch.LongTensor([edge_ids,
                                           all_h_list]).to(self.device)
        self.tail2edge = torch.LongTensor([edge_ids,
                                           all_t_list]).to(self.device)
        val_one = torch.ones_like(self.all_h_list).float().to(self.device)
        num_node = self.n_users + self.n_items
        self.edge2head_mat = self._build_sparse_tensor(self.edge2head, val_one,
                                                       (num_node, num_edge))
        self.head2edge_mat = self._build_sparse_tensor(self.head2edge, val_one,
                                                       (num_edge, num_node))
        self.tail2edge_mat = self._build_sparse_tensor(self.tail2edge, val_one,
                                                       (num_edge, num_node))
        self.num_edge = num_edge
        self.num_node = num_node

        # define layers and loss
        self.user_embedding = nn.Embedding(self.n_users, self.embedding_size)
        self.item_embedding = nn.Embedding(self.n_items, self.embedding_size)
        self.softmax = torch.nn.Softmax(dim=1)
        self.mf_loss = BPRLoss()
        self.reg_loss = EmbLoss()
        self.restore_user_e = None
        self.restore_item_e = None

        # parameters initialization
        self.apply(xavier_normal_initialization)
コード例 #8
0
    def __init__(self, config, dataset):
        super(RippleNet, self).__init__(config, dataset)

        # load dataset info
        self.LABEL = config['LABEL_FIELD']

        # load parameters info
        self.embedding_size = config['embedding_size']
        self.kg_weight = config['kg_weight']
        self.reg_weight = config['reg_weight']
        self.n_hop = config['n_hop']
        self.n_memory = config['n_memory']
        self.interaction_matrix = dataset.inter_matrix(form='coo').astype(
            np.float32)
        head_entities = dataset.head_entities.tolist()
        tail_entities = dataset.tail_entities.tolist()
        relations = dataset.relations.tolist()
        kg = {}
        for i in range(len(head_entities)):
            head_ent = head_entities[i]
            tail_ent = tail_entities[i]
            relation = relations[i]
            kg.setdefault(head_ent, [])
            kg[head_ent].append((tail_ent, relation))
        self.kg = kg
        users = self.interaction_matrix.row.tolist()
        items = self.interaction_matrix.col.tolist()
        user_dict = {}
        for i in range(len(users)):
            user = users[i]
            item = items[i]
            user_dict.setdefault(user, [])
            user_dict[user].append(item)
        self.user_dict = user_dict
        self.ripple_set = self._build_ripple_set()

        # define layers and loss
        self.entity_embedding = nn.Embedding(self.n_entities,
                                             self.embedding_size)
        self.relation_embedding = nn.Embedding(
            self.n_relations, self.embedding_size * self.embedding_size)
        self.transform_matrix = nn.Linear(self.embedding_size,
                                          self.embedding_size,
                                          bias=False)
        self.softmax = torch.nn.Softmax(dim=1)
        self.sigmoid = torch.nn.Sigmoid()
        self.rec_loss = BPRLoss()
        self.l2_loss = EmbLoss()
        self.loss = nn.BCEWithLogitsLoss()

        # parameters initialization
        self.apply(xavier_normal_initialization)
        self.other_parameter_name = ['ripple_set']
コード例 #9
0
ファイル: kgnnls.py プロジェクト: xingkongxiaxia/RecBole
    def __init__(self, config, dataset):
        super(KGNNLS, self).__init__(config, dataset)

        # load parameters info
        self.embedding_size = config['embedding_size']
        self.neighbor_sample_size = config['neighbor_sample_size']
        self.aggregator_class = config['aggregator']  # which aggregator to use
        # number of iterations when computing entity representation
        self.n_iter = config['n_iter']
        self.reg_weight = config['reg_weight']  # weight of l2 regularization
        # weight of label Smoothness regularization
        self.ls_weight = config['ls_weight']

        # define embedding
        self.user_embedding = nn.Embedding(self.n_users, self.embedding_size)
        self.entity_embedding = nn.Embedding(self.n_entities,
                                             self.embedding_size)
        self.relation_embedding = nn.Embedding(self.n_relations + 1,
                                               self.embedding_size)

        # sample neighbors and construct interaction table
        kg_graph = dataset.kg_graph(form='coo', value_field='relation_id')
        adj_entity, adj_relation = self.construct_adj(kg_graph)
        self.adj_entity, self.adj_relation = adj_entity.to(
            self.device), adj_relation.to(self.device)

        inter_feat = dataset.dataset.inter_feat.values
        pos_users = torch.from_numpy(inter_feat[:, 0])
        pos_items = torch.from_numpy(inter_feat[:, 1])
        pos_label = torch.ones(pos_items.shape)
        pos_interaction_table, self.offset = self.get_interaction_table(
            pos_users, pos_items, pos_label)
        self.interaction_table = self.sample_neg_interaction(
            pos_interaction_table, self.offset)

        # define function
        self.softmax = nn.Softmax(dim=-1)
        self.linear_layers = torch.nn.ModuleList()
        for i in range(self.n_iter):
            self.linear_layers.append(
                nn.Linear(
                    self.embedding_size
                    if not self.aggregator_class == 'concat' else
                    self.embedding_size * 2, self.embedding_size))
        self.ReLU = nn.ReLU()
        self.Tanh = nn.Tanh()

        self.bce_loss = nn.BCEWithLogitsLoss()
        self.l2_loss = EmbLoss()

        # parameters initialization
        self.apply(xavier_normal_initialization)
コード例 #10
0
ファイル: kgat.py プロジェクト: ShanleiMu/RecBole-1
    def __init__(self, config, dataset):
        super(KGAT, self).__init__(config, dataset)

        # load dataset info
        self.ckg = dataset.ckg_graph(form='dgl', value_field='relation_id')
        self.all_hs = torch.LongTensor(
            dataset.ckg_graph(form='coo',
                              value_field='relation_id').row).to(self.device)
        self.all_ts = torch.LongTensor(
            dataset.ckg_graph(form='coo',
                              value_field='relation_id').col).to(self.device)
        self.all_rs = torch.LongTensor(
            dataset.ckg_graph(form='coo',
                              value_field='relation_id').data).to(self.device)
        self.matrix_size = torch.Size(
            [self.n_users + self.n_entities, self.n_users + self.n_entities])

        # load parameters info
        self.embedding_size = config['embedding_size']
        self.kg_embedding_size = config['kg_embedding_size']
        self.layers = [self.embedding_size] + config['layers']
        self.aggregator_type = config['aggregator_type']
        self.mess_dropout = config['mess_dropout']
        self.reg_weight = config['reg_weight']

        # generate intermediate data
        self.A_in = self.init_graph(
        )  # init the attention matrix by the structure of ckg

        # define layers and loss
        self.user_embedding = nn.Embedding(self.n_users, self.embedding_size)
        self.entity_embedding = nn.Embedding(self.n_entities,
                                             self.embedding_size)
        self.relation_embedding = nn.Embedding(self.n_relations,
                                               self.kg_embedding_size)
        self.trans_w = nn.Embedding(
            self.n_relations, self.embedding_size * self.kg_embedding_size)
        self.aggregator_layers = nn.ModuleList()
        for idx, (input_dim, output_dim) in enumerate(
                zip(self.layers[:-1], self.layers[1:])):
            self.aggregator_layers.append(
                Aggregator(input_dim, output_dim, self.mess_dropout,
                           self.aggregator_type))
        self.tanh = nn.Tanh()
        self.mf_loss = BPRLoss()
        self.reg_loss = EmbLoss()
        self.restore_user_e = None
        self.restore_entity_e = None

        # parameters initialization
        self.apply(xavier_normal_initialization)
        self.other_parameter_name = ['restore_user_e', 'restore_entity_e']
コード例 #11
0
    def __init__(self, config, dataset):
        super(NCL, self).__init__(config, dataset)

        # load dataset info
        self.interaction_matrix = dataset.inter_matrix(form='coo').astype(
            np.float32)

        # load parameters info
        self.latent_dim = config[
            'embedding_size']  # int type: the embedding size of the base model
        self.n_layers = config[
            'n_layers']  # int type: the layer num of the base model
        self.reg_weight = config[
            'reg_weight']  # float32 type: the weight decay for l2 normalization

        self.ssl_temp = config['ssl_temp']
        self.ssl_reg = config['ssl_reg']
        self.hyper_layers = config['hyper_layers']

        self.alpha = config['alpha']

        self.proto_reg = config['proto_reg']
        self.k = config['num_clusters']

        # define layers and loss
        self.user_embedding = torch.nn.Embedding(num_embeddings=self.n_users,
                                                 embedding_dim=self.latent_dim)
        self.item_embedding = torch.nn.Embedding(num_embeddings=self.n_items,
                                                 embedding_dim=self.latent_dim)

        self.mf_loss = BPRLoss()
        self.reg_loss = EmbLoss()

        # storage variables for full sort evaluation acceleration
        self.restore_user_e = None
        self.restore_item_e = None

        self.norm_adj_mat = self.get_norm_adj_mat().to(self.device)

        # parameters initialization
        self.apply(xavier_uniform_initialization)
        self.other_parameter_name = ['restore_user_e', 'restore_item_e']

        self.user_centroids = None
        self.user_2cluster = None
        self.item_centroids = None
        self.item_2cluster = None
コード例 #12
0
ファイル: gcsan.py プロジェクト: hyp1231/RecBole
    def __init__(self, config, dataset):
        super(GCSAN, self).__init__(config, dataset)

        # load parameters info
        self.n_layers = config['n_layers']
        self.n_heads = config['n_heads']
        self.hidden_size = config['hidden_size']  # same as embedding_size
        self.inner_size = config[
            'inner_size']  # the dimensionality in feed-forward layer
        self.hidden_dropout_prob = config['hidden_dropout_prob']
        self.attn_dropout_prob = config['attn_dropout_prob']
        self.hidden_act = config['hidden_act']
        self.layer_norm_eps = config['layer_norm_eps']

        self.step = config['step']
        self.device = config['device']
        self.weight = config['weight']
        self.reg_weight = config['reg_weight']
        self.loss_type = config['loss_type']
        self.initializer_range = config['initializer_range']

        # define layers and loss
        self.item_embedding = nn.Embedding(self.n_items,
                                           self.hidden_size,
                                           padding_idx=0)
        self.gnn = GNN(self.hidden_size, self.step)
        self.self_attention = TransformerEncoder(
            n_layers=self.n_layers,
            n_heads=self.n_heads,
            hidden_size=self.hidden_size,
            inner_size=self.inner_size,
            hidden_dropout_prob=self.hidden_dropout_prob,
            attn_dropout_prob=self.attn_dropout_prob,
            hidden_act=self.hidden_act,
            layer_norm_eps=self.layer_norm_eps)
        self.reg_loss = EmbLoss()
        if self.loss_type == 'BPR':
            self.loss_fct = BPRLoss()
        elif self.loss_type == 'CE':
            self.loss_fct = nn.CrossEntropyLoss()
        else:
            raise NotImplementedError(
                "Make sure 'loss_type' in ['BPR', 'CE']!")

        # parameters initialization
        self.apply(self._init_weights)
コード例 #13
0
    def __init__(self, config, dataset):
        super(KGCN, self).__init__(config, dataset)

        # load parameters info
        self.embedding_size = config['embedding_size']
        # number of iterations when computing entity representation
        self.n_iter = config['n_iter']
        self.aggregator_class = config['aggregator']  # which aggregator to use
        self.reg_weight = config['reg_weight']  # weight of l2 regularization
        self.neighbor_sample_size = config['neighbor_sample_size']

        # define embedding
        self.user_embedding = nn.Embedding(self.n_users, self.embedding_size)
        self.entity_embedding = nn.Embedding(self.n_entities,
                                             self.embedding_size)
        self.relation_embedding = nn.Embedding(self.n_relations + 1,
                                               self.embedding_size)

        # sample neighbors
        kg_graph = dataset.kg_graph(form='coo', value_field='relation_id')
        adj_entity, adj_relation = self.construct_adj(kg_graph)
        self.adj_entity, self.adj_relation = adj_entity.to(
            self.device), adj_relation.to(self.device)

        # define function
        self.softmax = nn.Softmax(dim=-1)
        self.linear_layers = torch.nn.ModuleList()
        for i in range(self.n_iter):
            self.linear_layers.append(
                nn.Linear(
                    self.embedding_size
                    if not self.aggregator_class == 'concat' else
                    self.embedding_size * 2, self.embedding_size))
        self.ReLU = nn.ReLU()
        self.Tanh = nn.Tanh()

        self.bce_loss = nn.BCEWithLogitsLoss()
        self.l2_loss = EmbLoss()

        # parameters initialization
        self.apply(xavier_normal_initialization)
        self.other_parameter_name = ['adj_entity', 'adj_relation']
コード例 #14
0
ファイル: sgl.py プロジェクト: ShanleiMu/RecBole-1
 def __init__(self, config, dataset):
     super(SGL, self).__init__(config, dataset)
     self._user = dataset.inter_feat[dataset.uid_field]
     self._item = dataset.inter_feat[dataset.iid_field]
     self.embed_dim = config["embedding_size"]
     self.n_layers = int(config["n_layers"])
     self.type = config["type"]
     self.drop_ratio = config["drop_ratio"]
     self.ssl_tau = config["ssl_tau"]
     self.reg_weight = config["reg_weight"]
     self.ssl_weight = config["ssl_weight"]
     self.user_embedding = torch.nn.Embedding(self.n_users, self.embed_dim)
     self.item_embedding = torch.nn.Embedding(self.n_items, self.embed_dim)
     self.reg_loss = EmbLoss()
     self.train_graph = self.csr2tensor(
         self.create_adjust_matrix(is_sub=False))
     self.restore_user_e = None
     self.restore_item_e = None
     self.apply(xavier_uniform_initialization)
     self.other_parameter_name = ['restore_user_e', 'restore_item_e']
コード例 #15
0
    def __init__(self, config, dataset):
        super(CKE, self).__init__(config, dataset)

        # load parameters info
        self.embedding_size = config['embedding_size']
        self.kg_embedding_size = config['kg_embedding_size']
        self.reg_weights = config['reg_weights']

        # define layers and loss
        self.user_embedding = nn.Embedding(self.n_users, self.embedding_size)
        self.item_embedding = nn.Embedding(self.n_items, self.embedding_size)
        self.entity_embedding = nn.Embedding(self.n_entities, self.embedding_size)
        self.relation_embedding = nn.Embedding(self.n_relations, self.kg_embedding_size)
        self.trans_w = nn.Embedding(self.n_relations, self.embedding_size * self.kg_embedding_size)
        self.rec_loss = BPRLoss()
        self.kg_loss = BPRLoss()
        self.reg_loss = EmbLoss()

        # parameters initialization
        self.apply(xavier_normal_initialization)