import pytest import torch from torch_geometric.nn import GAT, GCN, Explainer, to_captum try: from captum import attr # noqa with_captum = True except ImportError: with_captum = False x = torch.randn(8, 3, requires_grad=True) edge_index = torch.tensor([[0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7], [1, 0, 2, 1, 3, 2, 4, 3, 5, 4, 6, 5, 7, 6]]) GCN = GCN(3, 16, 2, 7, dropout=0.5) GAT = GAT(3, 16, 2, 7, heads=2, concat=False) mask_types = ['edge', 'node_and_edge', 'node'] methods = [ 'Saliency', 'InputXGradient', 'Deconvolution', 'FeatureAblation', 'ShapleyValueSampling', 'IntegratedGradients', 'GradientShap', 'Occlusion', 'GuidedBackprop', 'KernelShap', 'Lime', ]
from torch_geometric.datasets import BAShapes from torch_geometric.nn import GCN, GNNExplainer from torch_geometric.utils import k_hop_subgraph import torch_geometric.transforms as T dataset = BAShapes(transform=T.GCNNorm()) data = dataset[0] idx = torch.arange(data.num_nodes) train_idx, test_idx = train_test_split(idx, train_size=0.8, stratify=data.y) device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') data = data.to(device) model = GCN(data.num_node_features, hidden_channels=20, num_layers=3, out_channels=dataset.num_classes, normalize=False).to(device) optimizer = torch.optim.Adam(model.parameters(), lr=0.001, weight_decay=0.005) def train(): model.train() optimizer.zero_grad() out = model(data.x, data.edge_index, data.edge_weight) loss = F.cross_entropy(out[train_idx], data.y[train_idx]) torch.nn.utils.clip_grad_norm_(model.parameters(), 2.0) loss.backward() optimizer.step() return float(loss)
import torch_geometric.transforms as T from torch_geometric.nn import GCN, MLP from torch_geometric.datasets import Planetoid parser = argparse.ArgumentParser() parser.add_argument('--lamb', type=float, default=0.0, help='Balances loss from hard labels and teacher outputs') args = parser.parse_args() device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') path = osp.join(osp.dirname(osp.realpath(__file__)), '..', 'data', 'Planetoid') dataset = Planetoid(path, name='Cora', transform=T.NormalizeFeatures()) data = dataset[0].to(device) gnn = GCN(dataset.num_node_features, hidden_channels=16, out_channels=dataset.num_classes, num_layers=2).to(device) mlp = MLP([dataset.num_node_features, 64, dataset.num_classes], dropout=0.5, batch_norm=False).to(device) gnn_optimizer = torch.optim.Adam(gnn.parameters(), lr=0.01, weight_decay=5e-4) mlp_optimizer = torch.optim.Adam(mlp.parameters(), lr=0.01, weight_decay=5e-4) def train_teacher(): gnn.train() gnn_optimizer.zero_grad() out = gnn(data.x, data.edge_index) loss = F.cross_entropy(out[data.train_mask], data.y[data.train_mask]) loss.backward() gnn_optimizer.step() return float(loss)