def test_srgcn_cora(): args = get_default_args_for_nc("cora", "srgcn") args.num_heads = 4 args.subheads = 1 args.nhop = 1 args.node_dropout = 0.5 args.alpha = 0.2 args.normalization = "identity" args.attention_type = "identity" args.activation = "linear" norm_list = ["identity", "row_uniform", "row_softmax", "col_uniform", "symmetry"] activation_list = ["relu", "relu6", "sigmoid", "tanh", "leaky_relu", "softplus", "elu", "linear"] attn_list = ["node", "edge", "identity", "heat", "ppr", "gaussian"] for norm in norm_list: args.normalization = norm ret = train(args) assert 0 <= ret["test_acc"] <= 1 args.norm = "identity" for ac in activation_list: args.activation = ac ret = train(args) assert 0 <= ret["test_acc"] <= 1 args.activation = "relu" for attn in attn_list: args.attention_type = attn ret = train(args) assert 0 <= ret["test_acc"] <= 1
def test_gcc_cora(): args = get_default_args_for_nc("cora", "gcc", mw="gcc_mw", dw="gcc_dw") args.epochs = 1 args.num_workers = 0 args.batch_size = 16 args.rw_hops = 8 args.subgraph_size = 16 args.positional_embedding_size = 16 args.nce_k = 4 train(args)
def test_gcn_cora(): args = get_default_args_for_nc("cora", "gcn") args.num_layers = 2 ret = train(args) assert 0 <= ret["test_acc"] <= 1 for n in ["batchnorm", "layernorm"]: args.norm = n ret = train(args) assert 0 <= ret["test_acc"] <= 1 args.residual = True ret = train(args) assert 0 <= ret["test_acc"] <= 1
def __call__(self, edge_index, x=None, edge_weight=None): if self.method_type == "emb": if isinstance(edge_index, np.ndarray): edge_index = torch.from_numpy(edge_index) edge_index = (edge_index[:, 0], edge_index[:, 1]) data = Graph(edge_index=edge_index, edge_weight=edge_weight) self.model = build_model(self.args) embeddings = self.model(data) elif self.method_type == "gnn": num_nodes = edge_index.max().item() + 1 if x is None: print("No input node features, use random features instead.") x = np.random.randn(num_nodes, self.num_features) if isinstance(x, np.ndarray): x = torch.from_numpy(x).float() if isinstance(edge_index, np.ndarray): edge_index = torch.from_numpy(edge_index) edge_index = (edge_index[:, 0], edge_index[:, 1]) data = Graph(x=x, edge_index=edge_index, edge_weight=edge_weight) torch.save(data, self.data_path) dataset = NodeDataset(path=self.data_path, scale_feat=False, metric="accuracy") self.args.dataset = dataset model = train(self.args) embeddings = model.embed(data.to(model.device)) embeddings = embeddings.detach().cpu().numpy() return embeddings
def test_dgi(): args = get_default_args_for_unsup_nn("cora", "dgi", mw="dgi_mw") args.activation = "relu" args.sparse = True args.epochs = 2 ret = train(args) assert ret["test_acc"] > 0
def test_rgcn_wn18(): args = get_default_args_kg(dataset="wn18", model="rgcn") args.self_dropout = 0.2 args.self_loop = True args.regularizer = "basis" ret = train(args) assert 0 <= ret["mrr"] <= 1
def test_attribute_mask(): args = get_default_args_generative("cora", "gcn", auxiliary_task="attribute_mask") args.alpha = 1 ret = train(args) assert 0 <= ret["test_acc"] <= 1
def test_prone_blogcatalog(): args = get_default_args_ne(dataset="blogcatalog", model="prone") args.step = 5 args.theta = 0.5 args.mu = 0.2 ret = train(args) assert ret["Micro-F1 0.1"] > 0
def test_distance_to_clusters(): args = get_default_args_generative("cora", "gcn", auxiliary_task="distance2clusters") args.alpha = 3 ret = train(args) assert 0 <= ret["test_acc"] <= 1
def test_prone_amazon(): args = get_default_args_multiplex(dataset="amazon", model="prone") args.step = 5 args.theta = 0.5 args.mu = 0.2 ret = train(args) assert ret["ROC_AUC"] >= 0 and ret["ROC_AUC"] <= 1
def test_prone_usa_airport(): args = get_default_args_ne(dataset="usa-airport", model="prone") args.step = 5 args.theta = 0.5 args.mu = 0.2 ret = train(args) assert ret["Micro-F1 0.1"] > 0
def test_pairwise_attr_sim(): args = get_default_args_generative("cora", "gcn", auxiliary_task="pairwise_attr_sim") args.alpha = 100 ret = train(args) assert 0 <= ret["test_acc"] <= 1
def test_pairwise_distance(): args = get_default_args_generative("cora", "gcn", auxiliary_task="pairwise_distance") args.alpha = 35 ret = train(args) assert 0 <= ret["test_acc"] <= 1
def test_sage_cora(): args = get_default_args_for_nc("cora", "sage") args.aggr = "mean" args.normalize = True args.norm = "layernorm" ret = train(args) assert 0 <= ret["test_acc"] <= 1
def test_netmf_ppi(): args = get_default_args_ne(dataset="ppi-ne", model="netmf") args.window_size = 2 args.rank = 32 args.negative = 3 args.is_large = False ret = train(args) assert ret["Micro-F1 0.1"] > 0
def test_disengcn_cora(): args = get_default_args_for_nc("cora", "disengcn") args.K = [4, 2] args.activation = "leaky_relu" args.tau = 1.0 args.iterations = 3 ret = train(args) assert 0 <= ret["test_acc"] <= 1
def test_clustergcn_pubmed(): args = get_default_args_for_nc("pubmed", "gcn", dw="cluster_dw") args.cpu = True args.batch_size = 3 args.n_cluster = 20 args.eval_step = 1 ret = train(args) assert 0 <= ret["test_acc"] <= 1
def test_patchy_san_mutag(): args = get_default_args_graph_clf(dataset="mutag", model="patchy_san", dw="patchy_san_dw") args = add_patchy_san_args(args) args.batch_size = 5 ret = train(args) assert ret["test_acc"] > 0
def test_diffpool_imdb_binary(): args = get_default_args_graph_clf(dataset="imdb-b", model="diffpool") args = add_diffpool_args(args) args.batch_size = 100 args.train_ratio = 0.6 args.test_ratio = 0.2 ret = train(args) assert ret["test_acc"] > 0
def test_compgcn_wn18rr(): args = get_default_args_kg(dataset="wn18rr", model="compgcn") args.lbl_smooth = 0.1 args.score_func = "distmult" args.regularizer = "basis" args.opn = "sub" ret = train(args) assert 0 <= ret["mrr"] <= 1
def test_netsmf_ppi(): args = get_default_args_ne(dataset="ppi-ne", model="netsmf") args.window_size = 3 args.negative = 1 args.num_round = 2 args.worker = 5 ret = train(args) assert ret["Micro-F1 0.1"] > 0
def test_gin_mutag(): args = get_default_args_graph_clf(dataset="mutag", model="gin") args = add_gin_args(args) args.batch_size = 20 for kfold in [True, False]: args.kfold = kfold args.seed = 0 ret = train(args) assert ret["test_acc"] > 0
def test_spectral_cora(): args = get_default_args_agc(dataset="cora", model="prone", mw="agc_mw", dw="node_classification_dw") args.model_type = "content" args.cluster_method = "spectral" ret = train(args) assert ret["nmi"] >= 0
def test_mvgrl(): args = get_default_args_for_unsup_nn("cora", "mvgrl", mw="mvgrl_mw") args.epochs = 2 args.sparse = False args.sample_size = 200 args.batch_size = 4 args.alpha = 0.2 ret = train(args) assert ret["test_acc"] > 0
def test_prone_ppi(): args = get_default_args_ne(dataset="ppi-ne", model="prone") args.enhance = "prone++" args.max_evals = 3 args.step = 5 args.theta = 0.5 args.mu = 0.2 ret = train(args) assert ret["Micro-F1 0.1"] > 0
def test_daegc_cora(): args = get_default_args_agc(dataset="cora", model="daegc", mw="daegc_mw", dw="node_classification_dw") args.model_type = "both" args.cluster_method = "kmeans" ret = train(args) assert ret["nmi"] >= 0
def test_deepwalk_wikipedia(): args = get_default_args_ne(dataset="wikipedia", model="deepwalk") args.walk_length = 5 args.walk_num = 1 args.window_size = 3 args.worker = 5 args.iteration = 1 ret = train(args) assert ret["Micro-F1 0.1"] > 0
def test_appnp_cora(): args = get_default_args_for_nc("cora", "ppnp") args.num_layers = 2 args.propagation_type = "appnp" args.alpha = 0.1 args.num_iterations = 10 ret = train(args) assert 0 < ret["test_acc"] < 1
def test_train(): args = get_default_args(dataset="cora", model="gcn", epochs=10, cpu=True) args.dataset = args.dataset[0] args.model = args.model[0] args.seed = args.seed[0] result = train(args) assert "test_acc" in result assert result["test_acc"] > 0
def test_c_s_cora(): args = get_default_args_for_nc("cora", "correct_smooth_mlp") args.use_embeddings = True args.correct_alpha = 0.5 args.smooth_alpha = 0.5 args.num_correct_prop = 2 args.num_smooth_prop = 2 args.correct_norm = "sym" args.smooth_norm = "sym" args.scale = 1.0 args.autoscale = True ret = train(args) assert 0 <= ret["test_acc"] <= 1 args.autoscale = False ret = train(args) assert 0 <= ret["test_acc"] <= 1