def main(): prog_args = configs.arg_parse() path = os.path.join(prog_args.logdir, io_utils.gen_prefix(prog_args)) writer = SummaryWriter(path) if prog_args.gpu: os.environ["CUDA_VISIBLE_DEVICES"] = prog_args.cuda print("CUDA", prog_args.cuda) else: print("Using CPU") # use --bmname=[dataset_name] for Reddit-Binary, Mutagenicity if prog_args.bmname is not None: benchmark_task(prog_args, writer=writer) elif prog_args.pkl_fname is not None: pkl_task(prog_args) elif prog_args.dataset is not None: if prog_args.dataset == "syn1": syn_task1(prog_args, writer=writer) elif prog_args.dataset == "syn2": syn_task2(prog_args, writer=writer) elif prog_args.dataset == "syn3": syn_task3(prog_args, writer=writer) elif prog_args.dataset == "syn4": syn_task4(prog_args, writer=writer) elif prog_args.dataset == "syn5": syn_task5(prog_args, writer=writer) elif prog_args.dataset == "enron": enron_task(prog_args, writer=writer) elif prog_args.dataset == "ppi_essential": ppi_essential_task(prog_args, writer=writer) writer.close()
def main(): args = configs.arg_parse() fix_seed(args.seed) node_indices = None start_time = time.time() if args.multiclass == False: filter_useless_features(args.dataset, args.model, args.feat_explainers, args.hops, args.num_samples, args.test_samples, args.K, args.prop_noise_feat, node_indices, args.info, args.hv, args.feat, args.coal, args.g, args.multiclass, args.regu, args.gpu, args.fullempty, args.S, args.seed) else: filter_useless_features_multiclass( args.dataset, args.model, args.feat_explainers, args.hops, args.num_samples, args.test_samples, args.prop_noise_feat, node_indices, 5, args.info, args.hv, args.feat, args.coal, args.g, args.multiclass, args.regu, args.gpu, args.fullempty, args.S, args.seed) end_time = time.time() print('Time: ', end_time - start_time)
def main(): # Load a configuration args = configs.arg_parse() fix_seed(args.seed) # GPU or CPU if args.gpu: print("CUDA") else: print("Using CPU") # Load dataset data = prepare_data(args.dataset, args.train_ratio, args.input_dim, args.seed) # Load model model_path = 'models/GCN_model_{}.pth'.format(args.dataset) model = torch.load(model_path) # Evaluate GraphSVX if args.dataset == 'Mutagenicity': data = selected_data(data, args.dataset) eval_Mutagenicity(data, model, args) elif args.dataset == 'syn6': eval_syn6(data, model, args) else: eval_syn(data, model, args)
def main(): args = configs.arg_parse() fix_seed(args.seed) # Load the dataset data = prepare_data(args.dataset, args.train_ratio, args.input_dim, args.seed) # Define and train the model if args.dataset in ['Cora', 'PubMed']: # Retrieve the model and training hyperparameters depending the data/model given as input hyperparam = ''.join(['hparams_', args.dataset, '_', args.model]) param = ''.join(['params_', args.dataset, '_', args.model]) model = eval(args.model)(input_dim=data.num_features, output_dim=data.num_classes, **eval(hyperparam)) train_and_val(model, data, **eval(param)) _, test_acc = evaluate(data, model, data.test_mask) print('Test accuracy is {:.4f}'.format(test_acc)) elif args.dataset in ['syn6', 'Mutagenicity']: input_dims = data.x.shape[-1] model = GcnEncoderGraph(input_dims, args.hidden_dim, args.output_dim, data.num_classes, args.num_gc_layers, bn=args.bn, dropout=args.dropout, args=args) train_gc(data, model, args) _, test_acc = evaluate(data, model, data.test_mask) print('Test accuracy is {:.4f}'.format(test_acc)) else: # For pytorch geometric model #model = GCNNet(args.input_dim, args.hidden_dim, # data.num_classes, args.num_gc_layers, args=args) input_dims = data.x.shape[-1] model = GcnEncoderNode(data.num_features, args.hidden_dim, args.output_dim, data.num_classes, args.num_gc_layers, bn=args.bn, dropout=args.dropout, args=args) train_syn(data, model, args) _, test_acc = evaluate(data, model, data.test_mask) print('Test accuracy is {:.4f}'.format(test_acc)) # Save model model_path = 'models/{}_model_{}.pth'.format(args.model, args.dataset) if not os.path.exists(model_path) or args.save == True: torch.save(model, model_path)
def main(): prog_args = configs.arg_parse() if prog_args.gpu: os.environ["CUDA_VISIBLE_DEVICES"] = prog_args.cuda print("CUDA", prog_args.cuda) else: print("Using CPU") return medic(prog_args)
def main(): args = configs.arg_parse() fix_seed(args.seed) # Load the dataset data = prepare_data(args.dataset, args.train_ratio, args.input_dim, args.seed) # Load the model model_path = 'models/{}_model_{}.pth'.format(args.model, args.dataset) model = torch.load(model_path) # Evaluate the model if args.dataset in ['Cora', 'PubMed']: _, test_acc = evaluate(data, model, data.test_mask) else: test_acc = test(data, model, data.test_mask) print('Test accuracy is {:.4f}'.format(test_acc)) # Explain it with GraphSVX explainer = GraphSVX(data, model, args.gpu) # Distinguish graph classfication from node classification if args.dataset in ['Mutagenicity', 'syn6']: explanations = explainer.explain_graphs(args.indexes, args.hops, args.num_samples, args.info, args.multiclass, args.fullempty, args.S, 'graph_classification', args.feat, args.coal, args.g, args.regu, True) else: explanations = explainer.explain(args.indexes, args.hops, args.num_samples, args.info, args.multiclass, args.fullempty, args.S, args.hv, args.feat, args.coal, args.g, args.regu, True) print('Sum explanations: ', [np.sum(explanation) for explanation in explanations]) print('Base value: ', explainer.base_values)
def main(): prog_args = configs.arg_parse() path = os.path.join(prog_args.logdir, io_utils.gen_prefix(prog_args)) writer = SummaryWriter(path) if prog_args.gpu: os.environ["CUDA_VISIBLE_DEVICES"] = prog_args.cuda print("CUDA", prog_args.cuda) else: print("Using CPU") syn_task1(prog_args, 30, writer=writer) syn_task2(prog_args, writer=writer) syn_task1(prog_args, 300, writer=writer) syn_task1(prog_args, 3000, writer=writer) writer.close()
def main(): prog_args = configs.arg_parse() path = os.path.join(prog_args.logdir, io_utils.gen_prefix(prog_args)) writer = SummaryWriter(path) if prog_args.gpu: os.environ["CUDA_VISIBLE_DEVICES"] = prog_args.cuda print("CUDA", prog_args.cuda) else: print("Using CPU") # use --bmname=[dataset_name] for Reddit-Binary, Mutagenicity if prog_args.dataset == "FFMpeg": FFMpeg(prog_args, writer=writer) elif prog_args.dataset == "fan": fan(prog_args) elif prog_args.dataset == "reveal": reveal(prog_args, writer=writer) writer.close()
for epoch in range(args.num_epochs): total_loss = 0 model.train() for batch in loader: # print ('batch:', batch.feat) opt.zero_grad() pred = model(batch) pred = pred[train_mask] # print ('pred:', pred) label = labels[train_mask] # print ('label:', label) loss = model.loss(pred, label) print('loss:', loss) loss.backward() opt.step() total_loss += loss.item() * 1 total_loss /= num_train writer.add_scalar("loss", total_loss, epoch) if epoch % 10 == 0: test_acc = test(loader, model, args, labels, test_mask) print("Epoch {}. Loss: {:.4f}. Test accuracy: {:.4f}".format( epoch, total_loss, test_acc)) writer.add_scalar("test accuracy", test_acc, epoch) prog_args = arg_parse() path = os.path.join(prog_args.logdir, io_utils.gen_prefix(prog_args)) syn_task1(prog_args, writer=SummaryWriter(path))
def get_ground_truth_syn4(node): buff = node - 1 base = [0, 1, 2, 3, 4, 5] ground_truth = [] offset = buff % 6 ground_truth = [buff - offset + val + 1 for val in base] return ground_truth def get_ground_truth_syn5(node): base = [0, 1, 2, 3, 4, 5, 6, 7, 8] buff = node - 7 ground_truth = [] offset = buff % 9 ground_truth = [buff - offset + val + 7 for val in base] return ground_truth # Get explanations prog_args = configs.arg_parse() savename = utils.gen_filesave(prog_args) explanations = np.load(savename, allow_pickle='TRUE').item() if prog_args.dataset is not None: if prog_args.dataset == "bitcoinalpha": evaluate_bitcoin_explanation(explanations, prog_args) elif prog_args.dataset == "bitcoinotc": evaluate_bitcoin_explanation(explanations, prog_args) else: evaluate_syn_explanation(explanations, prog_args)