import torch from torch_geometric.nn import GCNConv class GCN(torch.nn.Module): def __init__(self, in_channels, hidden_channels, out_channels): super(GCN, self).__init__() self.conv1 = GCNConv(in_channels, hidden_channels) self.conv2 = GCNConv(hidden_channels, out_channels) def forward(self, x, edge_index): x = self.conv1(x, edge_index).relu() x = self.conv2(x, edge_index) return x
import torch from torch_geometric.nn import GCNConv, BatchNorm class GCN(torch.nn.Module): def __init__(self, in_channels, hidden_channels, out_channels): super(GCN, self).__init__() self.conv1 = GCNConv(in_channels, hidden_channels) self.bn1 = BatchNorm(hidden_channels) self.conv2 = GCNConv(hidden_channels, out_channels) self.bn2 = BatchNorm(out_channels) self.dropout = torch.nn.Dropout(p=0.5) def forward(self, x, edge_index): x = self.bn1(self.conv1(x, edge_index)).relu() x = self.dropout(x) x = self.bn2(self.conv2(x, edge_index)) return xIn this example, we add batch normalization and dropout regularization to the GCN model. We use the BatchNorm module after each GCNConv layer to normalize the output features. We also use the Dropout module to randomly drop out a fraction of the outputs. Overall, the torch_geometric.nn GCNConv module is a useful tool for building deep learning models for graph datasets. It allows for efficient propagation of information through the graph structure and can be customized with various activation functions, regularization techniques, and architectural designs.