def main(): args = ArgsInit().args dataset = PygNodePropPredDataset(name=args.dataset) graph = dataset[0] if args.self_loop: graph.edge_index = add_self_loops(edge_index=graph.edge_index, num_nodes=graph.num_nodes)[0] split_idx = dataset.get_idx_split() evaluator = Evaluator(args.dataset) args.in_channels = graph.x.size(-1) args.num_tasks = dataset.num_classes print(args) model = DeeperGCN(args) print(model) model.load_state_dict(torch.load(args.model_load_path)['model_state_dict']) result = test(model, graph.x, graph.edge_index, graph.y, split_idx, evaluator) print(result) model.print_params(final=True)
def main(): args = ArgsInit().args if args.use_gpu: device = torch.device("cuda:" + str(args.device)) if torch.cuda.is_available() else torch.device("cpu") else: device = torch.device("cpu") dataset = OGBNDataset(dataset_name=args.dataset) args.num_tasks = dataset.num_tasks args.nf_path = dataset.extract_node_features(args.aggr) evaluator = Evaluator(args.dataset) valid_data_list = [] for i in range(args.num_evals): parts = dataset.random_partition_graph(dataset.total_no_of_nodes, cluster_number=args.valid_cluster_number) valid_data = dataset.generate_sub_graphs(parts, cluster_number=args.valid_cluster_number) valid_data_list.append(valid_data) model = DeeperGCN(args) model.load_state_dict(torch.load(args.model_load_path)['model_state_dict']) model.to(device) result = multi_evaluate(valid_data_list, dataset, model, evaluator, device) print(result) model.print_params(final=True)
def main(): args = ArgsInit().args if args.use_gpu: device = torch.device("cuda:" + str(args.device)) if torch.cuda.is_available( ) else torch.device("cpu") else: device = torch.device('cpu') dataset = PygGraphPropPredDataset(name=args.dataset) args.num_tasks = dataset.num_tasks print(args) if args.feature == 'full': pass elif args.feature == 'simple': print('using simple feature') # only retain the top two node/edge features dataset.data.x = dataset.data.x[:, :2] dataset.data.edge_attr = dataset.data.edge_attr[:, :2] split_idx = dataset.get_idx_split() evaluator = Evaluator(args.dataset) train_loader = DataLoader(dataset[split_idx["train"]], batch_size=args.batch_size, shuffle=False, num_workers=args.num_workers) valid_loader = DataLoader(dataset[split_idx["valid"]], batch_size=args.batch_size, shuffle=False, num_workers=args.num_workers) test_loader = DataLoader(dataset[split_idx["test"]], batch_size=args.batch_size, shuffle=False, num_workers=args.num_workers) model = DeeperGCN(args) model.load_state_dict(torch.load(args.model_load_path)['model_state_dict']) model.to(device) train_result = eval(model, device, train_loader, evaluator)[dataset.eval_metric] valid_result = eval(model, device, valid_loader, evaluator)[dataset.eval_metric] test_result = eval(model, device, test_loader, evaluator)[dataset.eval_metric] print({ 'Train': train_result, 'Validation': valid_result, 'Test': test_result }) model.print_params(final=True)
def main(): args = ArgsInit().args if args.use_gpu: device = torch.device("cuda:" + str(args.device)) if torch.cuda.is_available( ) else torch.device("cpu") else: device = torch.device('cpu') if args.not_extract_node_feature: dataset = PygGraphPropPredDataset(name=args.dataset, transform=add_zeros) else: extract_node_feature_func = partial(extract_node_feature, reduce=args.aggr) dataset = PygGraphPropPredDataset(name=args.dataset, transform=extract_node_feature_func) args.num_tasks = dataset.num_classes evaluator = Evaluator(args.dataset) split_idx = dataset.get_idx_split() train_loader = DataLoader(dataset[split_idx["train"]], batch_size=args.batch_size, shuffle=False, num_workers=args.num_workers) valid_loader = DataLoader(dataset[split_idx["valid"]], batch_size=args.batch_size, shuffle=False, num_workers=args.num_workers) test_loader = DataLoader(dataset[split_idx["test"]], batch_size=args.batch_size, shuffle=False, num_workers=args.num_workers) print(args) model = DeeperGCN(args) model.load_state_dict(torch.load(args.model_load_path)['model_state_dict']) model.to(device) train_accuracy = eval(model, device, train_loader, evaluator) valid_accuracy = eval(model, device, valid_loader, evaluator) test_accuracy = eval(model, device, test_loader, evaluator) print({ 'Train': train_accuracy, 'Validation': valid_accuracy, 'Test': test_accuracy }) model.print_params(final=True)
def main(): args = ArgsInit().args if args.use_gpu: device = torch.device("cuda:" + str(args.device)) if torch.cuda.is_available( ) else torch.device("cpu") else: device = torch.device('cpu') dataset = PygNodePropPredDataset(name=args.dataset) data = dataset[0] split_idx = dataset.get_idx_split() evaluator = Evaluator(args.dataset) x = data.x.to(device) y_true = data.y.to(device) edge_index = data.edge_index.to(device) edge_index = to_undirected(edge_index, data.num_nodes) if args.self_loop: edge_index = add_self_loops(edge_index, num_nodes=data.num_nodes)[0] args.in_channels = data.x.size(-1) args.num_tasks = dataset.num_classes print(args) model = DeeperGCN(args) model.load_state_dict(torch.load(args.model_load_path)['model_state_dict']) model.to(device) result = test(model, x, edge_index, y_true, split_idx, evaluator) train_accuracy, valid_accuracy, test_accuracy = result print({ 'Train': train_accuracy, 'Validation': valid_accuracy, 'Test': test_accuracy }) model.print_params(final=True)
def main(): args = ArgsInit().save_exp() if args.use_gpu: device = torch.device("cuda:" + str(args.device)) if torch.cuda.is_available( ) else torch.device("cpu") else: device = torch.device('cpu') sub_dir = 'BS_{}-NF_{}'.format(args.batch_size, args.feature) dataset = PygGraphPropPredDataset(name=args.dataset) args.num_tasks = dataset.num_tasks logging.info('%s' % args) if args.feature == 'full': pass elif args.feature == 'simple': print('using simple feature') # only retain the top two node/edge features dataset.data.x = dataset.data.x[:, :2] dataset.data.edge_attr = dataset.data.edge_attr[:, :2] evaluator = Evaluator(args.dataset) split_idx = dataset.get_idx_split() train_loader = DataLoader(dataset[split_idx["train"]], batch_size=args.batch_size, shuffle=True, num_workers=args.num_workers) valid_loader = DataLoader(dataset[split_idx["valid"]], batch_size=args.batch_size, shuffle=False, num_workers=args.num_workers) test_loader = DataLoader(dataset[split_idx["test"]], batch_size=args.batch_size, shuffle=False, num_workers=args.num_workers) model = DeeperGCN(args).to(device) logging.info(model) optimizer = optim.Adam(model.parameters(), lr=args.lr) results = { 'highest_valid': 0, 'final_train': 0, 'final_test': 0, 'highest_train': 0 } start_time = time.time() for epoch in range(1, args.epochs + 1): logging.info("=====Epoch {}".format(epoch)) logging.info('Training...') # epoch_loss = train(model, device, train_loader, optimizer, dataset.task_type) epoch_loss = train_flag(model, device, train_loader, optimizer, dataset.task_type, args) logging.info('Evaluating...') train_result = eval(model, device, train_loader, evaluator)[dataset.eval_metric] valid_result = eval(model, device, valid_loader, evaluator)[dataset.eval_metric] test_result = eval(model, device, test_loader, evaluator)[dataset.eval_metric] logging.info({ 'Train': train_result, 'Validation': valid_result, 'Test': test_result }) model.print_params(epoch=epoch) if train_result > results['highest_train']: results['highest_train'] = train_result if valid_result > results['highest_valid']: results['highest_valid'] = valid_result results['final_train'] = train_result results['final_test'] = test_result # save_ckpt(model, optimizer, # round(epoch_loss, 4), epoch, # args.model_save_path, # sub_dir, name_post='valid_best') logging.info("%s" % results) end_time = time.time() total_time = end_time - start_time logging.info('Total time: {}'.format( time.strftime('%H:%M:%S', time.gmtime(total_time))))
def main(): args = ArgsInit().save_exp() if args.use_gpu: device = torch.device("cuda:" + str(args.device)) if torch.cuda.is_available( ) else torch.device("cpu") else: device = torch.device('cpu') dataset = PygNodePropPredDataset(name=args.dataset, root=args.data_folder) graph = dataset[0] adj = SparseTensor(row=graph.edge_index[0], col=graph.edge_index[1]) if args.self_loop: adj = adj.set_diag() graph.edge_index = add_self_loops(edge_index=graph.edge_index, num_nodes=graph.num_nodes)[0] split_idx = dataset.get_idx_split() train_idx = split_idx["train"].tolist() evaluator = Evaluator(args.dataset) sub_dir = 'random-train_{}-full_batch_test'.format(args.cluster_number) logging.info(sub_dir) log_dir = os.path.join(args.save, "tensorboard/") writer = SummaryWriter(log_dir=log_dir) args.in_channels = graph.x.size(-1) args.num_tasks = dataset.num_classes logging.info('%s' % args) model = DeeperGCN(args).to(device) logging.info(model) optimizer = torch.optim.Adam(model.parameters(), lr=args.lr) results = { 'highest_valid': 0, 'final_train': 0, 'final_test': 0, 'highest_train': 0 } start_time = time.time() for epoch in range(1, args.epochs + 1): # generate batches parts = random_partition_graph(graph.num_nodes, cluster_number=args.cluster_number) data = generate_sub_graphs(adj, parts, cluster_number=args.cluster_number) # epoch_loss = train(data, model, graph.x, graph.y, train_idx, optimizer, device) epoch_loss = train_flag(data, model, graph.x, graph.y, train_idx, optimizer, device, args) logging.info('Epoch {}, training loss {:.4f}'.format( epoch, epoch_loss)) writer.add_scalar("Loss/train", epoch_loss, epoch) model.print_params(epoch=epoch) # if epoch % args.eval_epochs == 0: # save_ckpt(model, optimizer, # round(epoch_loss, 4), epoch, # args.model_save_path, # sub_dir, name_post=f'epoch{epoch}') # logging.info(f"Epoch {epoch}, saved model to checkpoint folder {args.model_save_path}") if epoch % args.eval_epochs == 0: # save_ckpt(model, optimizer, # round(epoch_loss, 4), epoch, # args.model_save_path, # sub_dir, name_post='final') # logging.info(f"Saved model to checkpoint folder {args.model_save_path}") logging.info(f'---- Evaluating at epoch {epoch} ----') res = test_with_partition(model, graph, adj, split_idx, num_clusters=args.eval_cluster_number, partition_method=args.partition_method, evaluator=evaluator, device=device) # result = test(model, graph.x, graph.edge_index, graph.y, split_idx, evaluator) logging.info(res) logging.info(f"---------------------------------") train_accuracy, valid_accuracy, test_accuracy = res[ "train_acc"], res["valid_acc"], res["test_acc"] writer.add_scalar("Acc/train", train_accuracy) writer.add_scalar("Acc/dev", valid_accuracy) if train_accuracy > results['highest_train']: results['highest_train'] = train_accuracy if valid_accuracy > results['highest_valid']: results['highest_valid'] = valid_accuracy results['final_train'] = train_accuracy results['final_test'] = test_accuracy save_ckpt(model, optimizer, round(epoch_loss, 4), epoch, args.model_save_path, sub_dir, name_post='valid_best') logging.info( f"Saved better model to checkpoint folder {args.model_save_path}" ) logging.info("%s" % results) end_time = time.time() total_time = end_time - start_time logging.info('Total time: {}'.format( time.strftime('%H:%M:%S', time.gmtime(total_time))))
def main(): args = ArgsInit().save_exp() if args.use_gpu: device = torch.device("cuda:" + str(args.device)) if torch.cuda.is_available( ) else torch.device("cpu") else: device = torch.device('cpu') sub_dir = 'BS_{}'.format(args.batch_size) if args.not_extract_node_feature: dataset = PygGraphPropPredDataset(name=args.dataset, transform=add_zeros) else: if args.aggr == 'add': dataset = PygGraphPropPredDataset( name=args.dataset, transform=extract_node_feature_add) elif args.aggr == 'mean': dataset = PygGraphPropPredDataset( name=args.dataset, transform=extract_node_feature_mean) elif args.aggr == 'max': dataset = PygGraphPropPredDataset( name=args.dataset, transform=extract_node_feature_max) else: raise Exception('Unknown Aggregation Type') sub_dir = sub_dir + '-NF_{}'.format(args.aggr) args.num_tasks = dataset.num_classes evaluator = Evaluator(args.dataset) logging.info('%s' % args) split_idx = dataset.get_idx_split() train_loader = DataLoader(dataset[split_idx["train"]], batch_size=args.batch_size, shuffle=True, num_workers=args.num_workers) valid_loader = DataLoader(dataset[split_idx["valid"]], batch_size=args.batch_size, shuffle=False, num_workers=args.num_workers) test_loader = DataLoader(dataset[split_idx["test"]], batch_size=args.batch_size, shuffle=False, num_workers=args.num_workers) model = DeeperGCN(args).to(device) logging.info(model) optimizer = optim.Adam(model.parameters(), lr=args.lr) criterion = torch.nn.CrossEntropyLoss() results = { 'highest_valid': 0, 'final_train': 0, 'final_test': 0, 'highest_train': 0 } start_time = time.time() evaluate = True for epoch in range(1, args.epochs + 1): logging.info("=====Epoch {}".format(epoch)) logging.info('Training...') epoch_loss = train(model, device, train_loader, optimizer, criterion) if args.num_layers > args.num_layers_threshold: if epoch % args.eval_steps != 0: evaluate = False else: evaluate = True model.print_params(epoch=epoch) if evaluate: logging.info('Evaluating...') train_accuracy = eval(model, device, train_loader, evaluator) valid_accuracy = eval(model, device, valid_loader, evaluator) test_accuracy = eval(model, device, test_loader, evaluator) logging.info({ 'Train': train_accuracy, 'Validation': valid_accuracy, 'Test': test_accuracy }) if train_accuracy > results['highest_train']: results['highest_train'] = train_accuracy if valid_accuracy > results['highest_valid']: results['highest_valid'] = valid_accuracy results['final_train'] = train_accuracy results['final_test'] = test_accuracy save_ckpt(model, optimizer, round(epoch_loss, 4), epoch, args.model_save_path, sub_dir, name_post='valid_best') logging.info("%s" % results) end_time = time.time() total_time = end_time - start_time logging.info('Total time: {}'.format( time.strftime('%H:%M:%S', time.gmtime(total_time))))
def main(): args = ArgsInit().save_exp() if args.use_gpu: device = torch.device("cuda:" + str(args.device)) if torch.cuda.is_available( ) else torch.device("cpu") else: device = torch.device('cpu') dataset = PygNodePropPredDataset(name=args.dataset) data = dataset[0] split_idx = dataset.get_idx_split() evaluator = Evaluator(args.dataset) x = data.x.to(device) y_true = data.y.to(device) train_idx = split_idx['train'].to(device) edge_index = data.edge_index.to(device) edge_index = to_undirected(edge_index, data.num_nodes) if args.self_loop: edge_index = add_self_loops(edge_index, num_nodes=data.num_nodes)[0] sub_dir = 'SL_{}'.format(args.self_loop) args.in_channels = data.x.size(-1) args.num_tasks = dataset.num_classes logging.info('%s' % args) model = DeeperGCN(args).to(device) logging.info(model) optimizer = torch.optim.Adam(model.parameters(), lr=args.lr) results = { 'highest_valid': 0, 'final_train': 0, 'final_test': 0, 'highest_train': 0 } start_time = time.time() for epoch in range(1, args.epochs + 1): epoch_loss = train(model, x, edge_index, y_true, train_idx, optimizer) logging.info('Epoch {}, training loss {:.4f}'.format( epoch, epoch_loss)) model.print_params(epoch=epoch) result = test(model, x, edge_index, y_true, split_idx, evaluator) logging.info(result) train_accuracy, valid_accuracy, test_accuracy = result if train_accuracy > results['highest_train']: results['highest_train'] = train_accuracy if valid_accuracy > results['highest_valid']: results['highest_valid'] = valid_accuracy results['final_train'] = train_accuracy results['final_test'] = test_accuracy save_ckpt(model, optimizer, round(epoch_loss, 4), epoch, args.model_save_path, sub_dir, name_post='valid_best') logging.info("%s" % results) end_time = time.time() total_time = end_time - start_time logging.info('Total time: {}'.format( time.strftime('%H:%M:%S', time.gmtime(total_time))))
def main(): args = ArgsInit().save_exp() if args.use_gpu: device = torch.device("cuda:" + str(args.device)) if torch.cuda.is_available( ) else torch.device("cpu") else: device = torch.device("cpu") logging.info('%s' % device) dataset = OGBNDataset(dataset_name=args.dataset) # extract initial node features nf_path = dataset.extract_node_features(args.aggr) args.num_tasks = dataset.num_tasks args.nf_path = nf_path logging.info('%s' % args) evaluator = Evaluator(args.dataset) criterion = torch.nn.BCEWithLogitsLoss() valid_data_list = [] for i in range(args.num_evals): parts = dataset.random_partition_graph( dataset.total_no_of_nodes, cluster_number=args.valid_cluster_number) valid_data = dataset.generate_sub_graphs( parts, cluster_number=args.valid_cluster_number) valid_data_list.append(valid_data) sub_dir = 'random-train_{}-test_{}-num_evals_{}'.format( args.cluster_number, args.valid_cluster_number, args.num_evals) logging.info(sub_dir) model = DeeperGCN(args).to(device) optimizer = optim.Adam(model.parameters(), lr=args.lr) results = { 'highest_valid': 0, 'final_train': 0, 'final_test': 0, 'highest_train': 0 } start_time = time.time() for epoch in range(1, args.epochs + 1): # do random partition every epoch train_parts = dataset.random_partition_graph( dataset.total_no_of_nodes, cluster_number=args.cluster_number) data = dataset.generate_sub_graphs(train_parts, cluster_number=args.cluster_number, ifmask=True) epoch_loss = train(data, dataset, model, optimizer, criterion, device) logging.info('Epoch {}, training loss {:.4f}'.format( epoch, epoch_loss)) model.print_params(epoch=epoch) result = multi_evaluate(valid_data_list, dataset, model, evaluator, device) if epoch % 5 == 0: logging.info('%s' % result) train_result = result['train']['rocauc'] valid_result = result['valid']['rocauc'] test_result = result['test']['rocauc'] if valid_result > results['highest_valid']: results['highest_valid'] = valid_result results['final_train'] = train_result results['final_test'] = test_result save_ckpt(model, optimizer, round(epoch_loss, 4), epoch, args.model_save_path, sub_dir, name_post='valid_best') if train_result > results['highest_train']: results['highest_train'] = train_result logging.info("%s" % results) end_time = time.time() total_time = end_time - start_time logging.info('Total time: {}'.format( time.strftime('%H:%M:%S', time.gmtime(total_time))))
def main(): args = ArgsInit().save_exp() if args.use_tensor_board: writer = SummaryWriter(log_dir=args.save) if args.use_gpu: device = torch.device("cuda:" + str(args.device)) if torch.cuda.is_available( ) else torch.device("cpu") else: device = torch.device('cpu') dataset = PygLinkPropPredDataset(name=args.dataset) data = dataset[0] # Data(edge_index=[2, 2358104], edge_weight=[2358104, 1], edge_year=[2358104, 1], x=[235868, 128]) split_edge = dataset.get_edge_split() evaluator = Evaluator(args.dataset) x = data.x.to(device) edge_index = data.edge_index.to(device) args.in_channels = data.x.size(-1) args.num_tasks = 1 logging.info('%s' % args) model = DeeperGCN(args).to(device) predictor = LinkPredictor(args).to(device) logging.info(model) logging.info(predictor) optimizer = torch.optim.Adam(list(model.parameters()) + list(predictor.parameters()), lr=args.lr) results = {} keys = ['highest_valid', 'final_train', 'final_test', 'highest_train'] hits = ['Hits@10', 'Hits@50', 'Hits@100'] for key in keys: results[key] = {k: 0 for k in hits} start_time = time.time() for epoch in range(1, args.epochs + 1): epoch_loss = train(model, predictor, x, edge_index, split_edge, optimizer, args.batch_size) logging.info('Epoch {}, training loss {:.4f}'.format( epoch, epoch_loss)) model.print_params(epoch=epoch) result = test(model, predictor, x, edge_index, split_edge, evaluator, args.batch_size) for k in hits: # return a tuple train_result, valid_result, test_result = result[k] if args.use_tensor_board and k == 'Hits@50': writer.add_scalar('stats/train_loss', epoch_loss, epoch) writer.add_scalar('stats/train_Hits@50', train_result, epoch) writer.add_scalar('stats/valid_Hits@50', valid_result, epoch) writer.add_scalar('stats/test_Hits@50', test_result, epoch) if train_result > results['highest_train'][k]: results['highest_train'][k] = train_result if valid_result > results['highest_valid'][k]: results['highest_valid'][k] = valid_result results['final_train'][k] = train_result results['final_test'][k] = test_result save_ckpt(model, optimizer, round(epoch_loss, 4), epoch, args.model_save_path, k, name_post='valid_best') save_ckpt(predictor, optimizer, round(epoch_loss, 4), epoch, args.model_save_path, k, name_post='valid_best_link_predictor') logging.info(result) logging.info("%s" % results) end_time = time.time() total_time = end_time - start_time time_used = 'Total time: {}'.format( time.strftime('%H:%M:%S', time.gmtime(total_time))) logging.info(time_used)
def main(): args = ArgsInit().save_exp() if args.use_gpu: device = torch.device("cuda:" + str(args.device)) if torch.cuda.is_available( ) else torch.device("cpu") else: device = torch.device('cpu') dataset = PygNodePropPredDataset(name=args.dataset) graph = dataset[0] adj = SparseTensor(row=graph.edge_index[0], col=graph.edge_index[1]) if args.self_loop: adj = adj.set_diag() graph.edge_index = add_self_loops(edge_index=graph.edge_index, num_nodes=graph.num_nodes)[0] split_idx = dataset.get_idx_split() train_idx = split_idx["train"].tolist() evaluator = Evaluator(args.dataset) sub_dir = 'random-train_{}-full_batch_test'.format(args.cluster_number) logging.info(sub_dir) args.in_channels = graph.x.size(-1) args.num_tasks = dataset.num_classes logging.info('%s' % args) model = DeeperGCN(args).to(device) logging.info(model) optimizer = torch.optim.Adam(model.parameters(), lr=args.lr) results = { 'highest_valid': 0, 'final_train': 0, 'final_test': 0, 'highest_train': 0 } start_time = time.time() for epoch in range(1, args.epochs + 1): # generate batches parts = random_partition_graph(graph.num_nodes, cluster_number=args.cluster_number) data = generate_sub_graphs(adj, parts, cluster_number=args.cluster_number) epoch_loss = train(data, model, graph.x, graph.y, train_idx, optimizer, device) logging.info('Epoch {}, training loss {:.4f}'.format( epoch, epoch_loss)) model.print_params(epoch=epoch) if epoch == args.epochs: result = test(model, graph.x, graph.edge_index, graph.y, split_idx, evaluator) logging.info(result) train_accuracy, valid_accuracy, test_accuracy = result if train_accuracy > results['highest_train']: results['highest_train'] = train_accuracy if valid_accuracy > results['highest_valid']: results['highest_valid'] = valid_accuracy results['final_train'] = train_accuracy results['final_test'] = test_accuracy save_ckpt(model, optimizer, round(epoch_loss, 4), epoch, args.model_save_path, sub_dir, name_post='valid_best') logging.info("%s" % results) end_time = time.time() total_time = end_time - start_time logging.info('Total time: {}'.format( time.strftime('%H:%M:%S', time.gmtime(total_time))))