def __init__(self, \ n_features, \ n_classes, \ n_hidden_GNN=[10], \ n_hidden_FC=[], \ dropout_GNN=0, \ dropout_FC=0): super(GINConv, self).__init__(\ n_features, n_classes, n_hidden_GNN,\ n_hidden_FC, dropout_FC, dropout_GNN) self.layers_GNN.append( pyg_nn.GINConv(nn.Sequential( nn.Linear(1, n_hidden_GNN[0]), nn.ReLU(), nn.Linear(n_hidden_GNN[0], n_hidden_GNN[0])), eps=0.2)) if self.n_layers_GNN > 1: for i in range(self.n_layers_GNN - 1): self.layers_GNN.append( pyg_nn.GINConv( nn.Sequential( nn.Linear(n_hidden_GNN[i], n_hidden_GNN[(i + 1)]), nn.ReLU(), nn.Linear(n_hidden_GNN[(i + 1)], n_hidden_GNN[(i + 1)]))))
def __init__(self, feat_in, hidden_features=10, output_dim=1, dropout=0.1): """ A class for graph nodes classification. Contains graph convolution layers :param feat_in: Number of features of one node :param hidden_features: Number of features between layers :param dropout: dropout value (after each layer) """ super(TreeSupport, self).__init__() self.dropout = dropout self.conv1 = gnn.GINConv( nn.Sequential(nn.Linear(feat_in, hidden_features), nn.LeakyReLU(), nn.Linear(hidden_features, hidden_features)), train_eps=True ) self.conv2 = gnn.GINConv( nn.Sequential(nn.Linear(hidden_features, max(hidden_features // 2, 1)), nn.LeakyReLU(), nn.Linear(max(hidden_features // 2, 1), max(hidden_features // 2, 1))), train_eps=True ) self.conv3 = gnn.GINConv( nn.Sequential(nn.Linear(max(hidden_features // 2, 1), max(hidden_features // 2, 1)), nn.LeakyReLU(), nn.Linear(max(hidden_features // 2, 1), max(hidden_features // 2, 1))), train_eps=True ) self.conv4 = gnn.SAGEConv(max(hidden_features // 2, 1), max(hidden_features // 2, 1)) self.conv5 = gnn.SAGEConv(max(hidden_features // 2, 1), output_dim)
def fun(): return GeneralPurposeNet(GraphSequential( gnn.GINConv(nn.Linear(4, 6), eps=1e-6, train_eps=True), Graph(nn.LeakyReLU)(inplace=True), gnn.GINConv(nn.Linear(6, 6), eps=1e-6, train_eps=True), Graph(nn.LeakyReLU)(inplace=True), gnn.GINConv(nn.Linear(6, 6), eps=1e-6, train_eps=True), Graph(nn.LeakyReLU)(inplace=True), gnn.GINConv(nn.Linear(6, 6), eps=1e-6, train_eps=True), Graph(nn.LeakyReLU)(inplace=True), gnn.GINConv(nn.Linear(6, 1), eps=1e-6, train_eps=True), ), detach=detach)
def build_conv_model(self, input_dim, hidden_dim): # refer to pytorch geometric nn module for different implementation of GNNs. if self.task == 'node': return pyg_nn.GCNConv(input_dim, hidden_dim) else: return pyg_nn.GINConv(nn.Sequential(nn.Linear(input_dim, hidden_dim), nn.ReLU(), nn.Linear(hidden_dim, hidden_dim)))
def __init__(self, input_dim, hidden_dim, output_dim, args): super(GIN, self).__init__() self.num_layers = args.num_layers self.pre_mp = nn.Sequential( nn.Linear(input_dim, hidden_dim)) self.convs = nn.ModuleList() self.nn1 = Sequential(Linear(hidden_dim, hidden_dim), ReLU(), Linear(hidden_dim, hidden_dim)) self.convs.append(pyg_nn.GINConv(self.nn1)) for l in range(args.num_layers-1): self.nnk = Sequential(Linear(hidden_dim, hidden_dim), ReLU(), Linear(hidden_dim, hidden_dim)) self.convs.append(pyg_nn.GINConv(self.nnk)) self.post_mp = nn.Sequential( nn.Linear(hidden_dim, hidden_dim), ReLU(), nn.Linear(hidden_dim, output_dim))
def build_conv_model(self, input_dim, hidden_dim): if not self.conv_func: return pyg_nn.GINConv(nn.Sequential( nn.Linear(input_dim, hidden_dim), nn.ReLU(), nn.Linear(hidden_dim, hidden_dim)), train_eps=self.train_eps) elif self.conv_func == 'GATConv': return pyg_nn.GATConv(input_dim, hidden_dim)
def build_conv_layer(input_dim: int, hidden_dim: int, task: str = 'graph') -> pyg_nn.MessagePassing: if task == 'graph': return pyg_nn.GINConv(nn.Sequential( nn.Linear(input_dim, hidden_dim), nn.ReLU(), nn.Linear(hidden_dim, hidden_dim) )) else: return pyg_nn.GCNConv(input_dim, hidden_dim)
def build_conv_model(self, input_dim, output_dim): args = self.args if args.method == 'base': # sage with add agg conv_model = MyConv(input_dim, output_dim) elif args.method == 'gcn': conv_model = pyg_nn.GCNConv(input_dim, output_dim) elif args.method == 'gin': conv_model = pyg_nn.GINConv( nn.Sequential(nn.Linear(input_dim, output_dim), nn.ReLU(), nn.Linear(output_dim, output_dim))) return conv_model
def build_conv_model(self, model_type, input_dim, hidden_dim): if True: # use a simple GCN for node embedding if model_type == 'GCN': return pyg_nn.GCNConv(input_dim, hidden_dim) elif model_type == 'GraphSage': return GraphSage(input_dim, hidden_dim) elif model_type == 'GAT': return GAT(input_dim, hidden_dim) else: # for whole graph embedding return pyg_nn.GINConv(nn.Sequential(nn.Linear(input_dim, hidden_dim), nn.ReLU(), nn.Linear(hidden_dim, hidden_dim)))
def __init__(self, in_channels, out_channels, n_layers, n_hiddens, train_eps): super().__init__() self.n_layers = n_layers self.n_hiddens = n_hiddens self.train_eps = train_eps self.out_channels = out_channels nn_layers = [] last_dim = in_channels.node for i in range(self.n_layers - 1): nn_layers.append(nn.Linear(last_dim, self.n_hiddens)) last_dim = self.n_hiddens nn_layers.append(nn.ReLU()) nn_layers.append(nn.Linear(last_dim, out_channels.node)) net = nn.Sequential(*nn_layers) self.conv = geom_nn.GINConv(net, train_eps=self.train_eps)
def __init__(self, num_features, num_classes, hidden_units=32, num_layers=3, dropout=0.15, mlp_layers=2, train_eps=False): super(GIN, self).__init__() convs, bns = [], [] linears = [nn.Linear(num_features, num_classes)] for i in range(num_layers - 1): input_dim = num_features if i == 0 else hidden_units convs.append( gnn.GINConv(MLP(input_dim, hidden_units, hidden_units, mlp_layers), train_eps=train_eps)) bns.append(nn.BatchNorm1d(hidden_units)) linears.append(nn.Linear(hidden_units, num_classes)) self.convs = nn.ModuleList(convs) self.bns = nn.ModuleList(bns) self.linears = nn.ModuleList(linears) self.num_layers = num_layers self.dropout = dropout