def main(): log_path = __file__[:-3] init_run(log_path, 2021) device = torch.device('cuda') config = get_gowalla_config(device) dataset_config, model_config, trainer_config = config[5] dataset_config['path'] = dataset_config['path'][:-4] + '0_dropit' writer = SummaryWriter(log_path) dataset = get_dataset(dataset_config) model = get_model(model_config, dataset) trainer = get_trainer(trainer_config, dataset, model) trainer.train(verbose=True, writer=writer) writer.close() dataset_config['path'] = dataset_config['path'][:-7] new_dataset = get_dataset(dataset_config) model.config['dataset'] = new_dataset trainer = get_trainer(trainer_config, new_dataset, model) results, _ = trainer.eval('test') print('Previous interactions test result. {:s}'.format(results)) model.normalized_data_mat = model.get_data_mat(new_dataset) results, _ = trainer.eval('test') print('Updated interactions test result. {:s}'.format(results))
def main(): log_path = __file__[:-3] init_run(log_path, 2021) device = torch.device('cuda') config = get_gowalla_config(device) dataset_config, model_config, trainer_config = config[6] dataset_config['path'] = dataset_config['path'][:-4] + '0_dropui' writer = SummaryWriter(log_path) dataset = get_dataset(dataset_config) model = get_model(model_config, dataset) trainer = get_trainer(trainer_config, dataset, model) trainer.train(verbose=True, writer=writer) writer.close() dataset_config['path'] = dataset_config['path'][:-7] new_dataset = get_dataset(dataset_config) model.config['dataset'] = new_dataset model.n_users, model.n_items = new_dataset.n_users, new_dataset.n_items model.feat_mat, _, _, model.row_sum = model.generate_feat(new_dataset, is_updating=True) model.update_feat_mat() trainer = get_trainer(trainer_config, new_dataset, model) trainer.inductive_eval(dataset.n_users, dataset.n_items)
def main(): log_path = __file__[:-3] init_run(log_path, 2021) device = torch.device('cuda') config = get_gowalla_config(device) dataset_config, model_config, trainer_config = config[3] dataset_config['path'] = dataset_config['path'][:-4] + '0_dropit' dataset = get_dataset(dataset_config) model = get_model(model_config, dataset) dataset_config['path'] = dataset_config['path'][:-7] new_dataset = get_dataset(dataset_config) model.config['dataset'] = new_dataset trainer = get_trainer(trainer_config, new_dataset, model) results, _ = trainer.eval('test') print('Previous interactions test result. {:s}'.format(results)) data_mat = sp.coo_matrix((np.ones((len( new_dataset.train_array), )), np.array(new_dataset.train_array).T), shape=(new_dataset.n_users, new_dataset.n_items), dtype=np.float32).tocsr() model.data_mat = data_mat results, _ = trainer.eval('test') print('Updated interactions test result. {:s}'.format(results))
def main(): log_path = __file__[:-3] init_run(log_path, 2021) device = torch.device('cuda') config = get_gowalla_config(device) dataset_config, model_config, trainer_config = config[3] dataset_config['path'] = dataset_config['path'][:-4] + '0_dropui' dataset = get_dataset(dataset_config) model = get_model(model_config, dataset) dataset_config['path'] = dataset_config['path'][:-7] new_dataset = get_dataset(dataset_config) model.config['dataset'] = new_dataset model.n_users, model.n_items = new_dataset.n_users, new_dataset.n_items data_mat = sp.coo_matrix((np.ones((len( new_dataset.train_array), )), np.array(new_dataset.train_array).T), shape=(new_dataset.n_users, new_dataset.n_items), dtype=np.float32).tocsr() model.data_mat = data_mat sim_mat = model.sim_mat.tocoo() sim_mat = sp.coo_matrix((sim_mat.data, (sim_mat.row, sim_mat.col)), shape=(new_dataset.n_items, new_dataset.n_items)) model.sim_mat = sim_mat.tocsr() trainer = get_trainer(trainer_config, new_dataset, model) trainer.inductive_eval(dataset.n_users, dataset.n_items)
def main(): log_path = __file__[:-3] init_run(log_path, 2021) param_grid = {'lr': [1.e-3], 'l2_reg': [0., 1.e-5], 'dropout': [0.3, 0.5, 0.7, 0.9]} grid = ParameterGrid(param_grid) max_ndcg = -np.inf best_params = None for params in grid: ndcg = fitness(params['lr'], params['l2_reg'], params['dropout']) print('NDCG: {:.3f}, Parameters: {:s}'.format(ndcg, str(params))) if ndcg > max_ndcg: max_ndcg = ndcg best_params = params print('Maximum NDCG!') print('Maximum NDCG: {:.3f}, Best Parameters: {:s}'.format(max_ndcg, str(best_params)))
def main(): log_path = __file__[:-3] init_run(log_path, 2021) param_grid = {'k': [10, 50, 200, 1000]} grid = ParameterGrid(param_grid) max_ndcg = -np.inf best_params = None for params in grid: ndcg = fitness(params['k']) print('NDCG: {:.3f}, Parameters: {:s}'.format(ndcg, str(params))) if ndcg > max_ndcg: max_ndcg = ndcg best_params = params print('Maximum NDCG!') print('Maximum NDCG: {:.3f}, Best Parameters: {:s}'.format(max_ndcg, str(best_params)))
def main(): log_path = __file__[:-3] init_run(log_path, 2021) device = torch.device('cuda') config = get_gowalla_config(device) dataset_config, model_config, trainer_config = config[2] dataset_config['path'] = dataset_config['path'][:-4] + str(1) writer = SummaryWriter(log_path) dataset = get_dataset(dataset_config) model = get_model(model_config, dataset) trainer = get_trainer(trainer_config, dataset, model) trainer.train(verbose=True, writer=writer) writer.close() results, _ = trainer.eval('test') print('Test result. {:s}'.format(results))
def main(): log_path = __file__[:-3] init_run(log_path, 2021) device = torch.device('cuda') config = get_gowalla_config(device) dataset_config, model_config, trainer_config = config[2] dataset_config['path'] = dataset_config['path'][:-4] + str(1) dataset = get_dataset(dataset_config) adj = generate_daj_mat(dataset) part_adj = adj[:dataset.n_users, dataset.n_users:] part_adj_tensor = get_sparse_tensor(part_adj, 'cpu') with torch.no_grad(): u, s, v = torch.svd_lowrank(part_adj_tensor, 64) sort_ranked_users, sort_ranked_items = graph_rank_nodes(dataset, 'sort') degree_ranked_users, degree_ranked_items = graph_rank_nodes( dataset, 'degree') pr_ranked_users, pr_ranked_items = graph_rank_nodes(dataset, 'page_rank') ranked_users = (sort_ranked_users, degree_ranked_users, pr_ranked_users) ranked_items = (sort_ranked_items, degree_ranked_items, pr_ranked_items) pdf = PdfPages('figure_5.pdf') fig, ax = plt.subplots(nrows=1, ncols=2, constrained_layout=True, figsize=(11, 4)) axes = ax.flatten() plot_error(part_adj, u.cpu().numpy(), ranked_users, axes[0], device, 'users') plot_error( part_adj.T, v.cpu().numpy(), ranked_items, axes[1], device, 'items', ) pdf.savefig() plt.close(fig) pdf.close()
def main(): log_path = __file__[:-3] init_run(log_path, 2021) device = torch.device('cuda') config = get_gowalla_config(device) dataset_config, model_config, trainer_config = config[2] dataset_config['path'] = dataset_config['path'][:-4] + '0_dropui' writer = SummaryWriter(log_path) dataset = get_dataset(dataset_config) model = get_model(model_config, dataset) trainer = get_trainer(trainer_config, dataset, model) trainer.train(verbose=True, writer=writer) writer.close() dataset_config['path'] = dataset_config['path'][:-7] new_dataset = get_dataset(dataset_config) model.config['dataset'] = new_dataset model.n_users, model.n_items = new_dataset.n_users, new_dataset.n_items model.norm_adj = model.generate_graph(new_dataset) model.feat_mat, _, _, model.row_sum = model.generate_feat(new_dataset, is_updating=True) model.update_feat_mat() trainer = get_trainer(trainer_config, new_dataset, model) print('Inductive results.') trainer.inductive_eval(dataset.n_users, dataset.n_items) model = get_model(model_config, new_dataset) model.load('checkpoints/...') trainer = get_trainer(trainer_config, new_dataset, model) print('Transductive model results.') trainer.inductive_eval(dataset.n_users, dataset.n_items) model_config['name'] = 'Popularity' trainer_config['name'] = 'BasicTrainer' model = get_model(model_config, new_dataset) trainer = get_trainer(trainer_config, new_dataset, model) print('Popularity model results.') trainer.inductive_eval(dataset.n_users, dataset.n_items)
def main(): log_path = __file__[:-3] init_run(log_path, 2021) device = torch.device('cuda') config = get_gowalla_config(device) dataset_config, model_config, trainer_config = config[7] dataset_config['path'] = dataset_config['path'][:-4] + '0_dropui' writer = SummaryWriter(log_path) dataset = get_dataset(dataset_config) model = get_model(model_config, dataset) trainer = get_trainer(trainer_config, dataset, model) trainer.train(verbose=True, writer=writer) writer.close() dataset_config['path'] = dataset_config['path'][:-7] new_dataset = get_dataset(dataset_config) model.config['dataset'] = new_dataset model.n_users, model.n_items = new_dataset.n_users, new_dataset.n_items model.norm_adj = model.generate_graph(new_dataset) with torch.no_grad(): old_embedding = model.embedding.weight model.embedding = torch.nn.Embedding(new_dataset.n_users + new_dataset.n_items + 3, model.embedding_size, device=device) model.embedding.weight[:, :] = old_embedding[:-3, :].mean( dim=0)[None, :].expand(model.embedding.weight.shape) model.embedding.weight[-3:, :] = old_embedding[-3:, :] model.embedding.weight[:dataset.n_users, :] = old_embedding[:dataset. n_users, :] model.embedding.weight[new_dataset.n_users:new_dataset.n_users + dataset.n_items, :] = \ old_embedding[dataset.n_users:-3, :] trainer = get_trainer(trainer_config, new_dataset, model) trainer.inductive_eval(dataset.n_users, dataset.n_items)
def main(): log_path = __file__[:-3] init_run(log_path, 2021) device = torch.device('cuda') config = get_gowalla_config(device) dataset_config, model_config, trainer_config = config[5] dataset_config['path'] = dataset_config['path'][:-4] + '0_dropui' writer = SummaryWriter(log_path) dataset = get_dataset(dataset_config) model = get_model(model_config, dataset) trainer = get_trainer(trainer_config, dataset, model) trainer.train(verbose=True, writer=writer) writer.close() dataset_config['path'] = dataset_config['path'][:-7] new_dataset = get_dataset(dataset_config) model.config['dataset'] = new_dataset model.n_users, model.n_items = new_dataset.n_users, new_dataset.n_items data_mat = model.get_data_mat(new_dataset)[:, :dataset.n_items] model.normalized_data_mat = normalize(data_mat, axis=1, norm='l2') trainer = get_trainer(trainer_config, new_dataset, model) trainer.inductive_eval(dataset.n_users, dataset.n_items)