Esempio n. 1
0
    def ml_forward(self, users):
        users = users.cpu().numpy()
        profiles = self.normalized_data_mat[users, :]
        representations = get_sparse_tensor(profiles, self.device)

        representations = NGCF.dropout_sp_mat(self, representations)
        representations = torch.sparse.mm(representations, self.encoder_layers[0].weight.t())
        representations += self.encoder_layers[0].bias[None, :]
        l2_norm_sq = torch.norm(self.encoder_layers[0].weight, p=2)[None] ** 2
        for layer in self.encoder_layers[1:]:
            representations = layer(torch.tanh(representations))
            l2_norm_sq += torch.norm(layer.weight, p=2)[None] ** 2

        mean, log_var = representations[:, :self.mid_size], representations[:, -self.mid_size:]
        std = torch.exp(0.5 * log_var)
        kl = torch.sum(-log_var + torch.exp(log_var) + mean ** 2, dim=1)
        epsilon = torch.randn(mean.shape[0], mean.shape[1], device=self.device)
        representations = mean + float(self.training) * epsilon * std

        for layer in self.decoder_layers[:-1]:
            representations = torch.tanh(layer(representations))
            l2_norm_sq += torch.norm(layer.weight, p=2)[None] ** 2
        scores = self.decoder_layers[-1](representations)
        l2_norm_sq += torch.norm(self.decoder_layers[-1].weight, p=2)[None] ** 2
        return scores, kl, l2_norm_sq
Esempio n. 2
0
    def generate_graph(self, dataset):
        adj_mat = generate_daj_mat(dataset)
        adj_mat = adj_mat + sp.eye(adj_mat.shape[0], format='csr')

        norm_adj = normalize(adj_mat, axis=1, norm='l1')
        norm_adj = get_sparse_tensor(norm_adj, self.device)
        return norm_adj
Esempio n. 3
0
    def generate_graph(self, dataset):
        adj_mat = generate_daj_mat(dataset)
        degree = np.array(np.sum(adj_mat, axis=1)).squeeze()
        degree = np.maximum(1., degree)
        d_inv = np.power(degree, -0.5)
        d_mat = sp.diags(d_inv, format='csr', dtype=np.float32)

        norm_adj = d_mat.dot(adj_mat).dot(d_mat)
        norm_adj = get_sparse_tensor(norm_adj, self.device)
        return norm_adj
Esempio n. 4
0
def main():
    log_path = __file__[:-3]
    init_run(log_path, 2021)

    device = torch.device('cuda')
    config = get_gowalla_config(device)
    dataset_config, model_config, trainer_config = config[2]
    dataset_config['path'] = dataset_config['path'][:-4] + str(1)

    dataset = get_dataset(dataset_config)
    adj = generate_daj_mat(dataset)
    part_adj = adj[:dataset.n_users, dataset.n_users:]
    part_adj_tensor = get_sparse_tensor(part_adj, 'cpu')
    with torch.no_grad():
        u, s, v = torch.svd_lowrank(part_adj_tensor, 64)

    sort_ranked_users, sort_ranked_items = graph_rank_nodes(dataset, 'sort')
    degree_ranked_users, degree_ranked_items = graph_rank_nodes(
        dataset, 'degree')
    pr_ranked_users, pr_ranked_items = graph_rank_nodes(dataset, 'page_rank')
    ranked_users = (sort_ranked_users, degree_ranked_users, pr_ranked_users)
    ranked_items = (sort_ranked_items, degree_ranked_items, pr_ranked_items)
    pdf = PdfPages('figure_5.pdf')
    fig, ax = plt.subplots(nrows=1,
                           ncols=2,
                           constrained_layout=True,
                           figsize=(11, 4))
    axes = ax.flatten()
    plot_error(part_adj,
               u.cpu().numpy(), ranked_users, axes[0], device, 'users')
    plot_error(
        part_adj.T,
        v.cpu().numpy(),
        ranked_items,
        axes[1],
        device,
        'items',
    )
    pdf.savefig()
    plt.close(fig)
    pdf.close()
Esempio n. 5
0
    def train_one_epoch(self):
        kl_reg = min(self.kl_reg, 1. * self.epoch / self.n_epochs)

        losses = AverageMeter()
        for users in self.train_user_loader:
            users = users[0]

            scores, kl, l2_norm_sq = self.model.ml_forward(users)
            scores = F.log_softmax(scores, dim=1)
            users = users.cpu().numpy()
            profiles = self.data_mat[users, :]
            profiles = get_sparse_tensor(profiles, self.device).to_dense()
            ml_loss = -torch.sum(profiles * scores, dim=1).mean()

            reg_loss = kl_reg * kl.mean() + self.l2_reg * l2_norm_sq.mean()
            loss = ml_loss + reg_loss
            self.opt.zero_grad()
            loss.backward()
            self.opt.step()
            losses.update(loss.item(), users.shape[0])
        return losses.avg
Esempio n. 6
0
    def generate_feat(self, dataset, is_updating=False, ranking_metric=None):
        if not is_updating:
            if self.feature_ratio < 1.:
                ranked_users, ranked_items = graph_rank_nodes(dataset, ranking_metric)
                core_users = ranked_users[:int(self.n_users * self.feature_ratio)]
                core_items = ranked_items[:int(self.n_items * self.feature_ratio)]
            else:
                core_users = np.arange(self.n_users, dtype=np.int64)
                core_items = np.arange(self.n_items, dtype=np.int64)

            user_map = dict()
            for idx, user in enumerate(core_users):
                user_map[user] = idx
            item_map = dict()
            for idx, item in enumerate(core_items):
                item_map[item] = idx
        else:
            user_map = self.user_map
            item_map = self.item_map

        user_dim, item_dim = len(user_map), len(item_map)
        indices = []
        for user, item in dataset.train_array:
            if item in item_map:
                indices.append([user, user_dim + item_map[item]])
            if user in user_map:
                indices.append([self.n_users + item, user_map[user]])
        for user in range(self.n_users):
            indices.append([user, user_dim + item_dim])
        for item in range(self.n_items):
            indices.append([self.n_users + item, user_dim + item_dim + 1])
        feat = sp.coo_matrix((np.ones((len(indices),)), np.array(indices).T),
                             shape=(self.n_users + self.n_items, user_dim + item_dim + 2), dtype=np.float32).tocsr()
        row_sum = torch.tensor(np.array(np.sum(feat, axis=1)).squeeze(), dtype=torch.float32, device=self.device)
        feat = get_sparse_tensor(feat, self.device)
        return feat, user_map, item_map, row_sum
Esempio n. 7
0
 def generate_feat(self, dataset):
     feat_mat = generate_daj_mat(dataset)
     feat_mat = sp.hstack([feat_mat[:, :self.n_old_users], feat_mat[:, self.n_users:self.n_users + self.n_old_items]])
     feat_mat = get_sparse_tensor(feat_mat, self.device)
     return feat_mat