def __init__(self, input_dim, hidden_dim, output_dim, num_layers, gnn_type): super(GNN, self).__init__() self.hidden_dim = hidden_dim self.num_layers = num_layers self.gnn_type = gnn_type self.convs = nn.ModuleList() if gnn_type == "gcn": self.convs.append(GraphConv(input_dim, hidden_dim)) elif gnn_type == "sage": self.convs.append(SAGEConv(input_dim, hidden_dim, "gcn")) elif gnn_type == "gat": self.convs.append(GATConv(input_dim, hidden_dim, num_heads=3)) else: raise ValueError("Invalid gnn_type") for i in range(num_layers - 2): if gnn_type == "gcn": self.convs.append(GraphConv(hidden_dim, hidden_dim)) elif gnn_type == "sage": self.convs.append(SAGEConv(hidden_dim, hidden_dim, "gcn")) elif gnn_type == "gat": self.convs.append(GATConv(hidden_dim, hidden_dim, num_heads=3)) if gnn_type == "gcn": self.convs.append(GraphConv(hidden_dim, output_dim)) elif gnn_type == "sage": self.convs.append(SAGEConv(hidden_dim, output_dim, "gcn")) elif gnn_type == "gat": self.convs.append(GATConv(hidden_dim, output_dim, num_heads=3))
def __init__(self, G, hid_dims, num_layers, aggregator_type='gcn', feat_drop=0.0, bias=True, norm=None, activation=None, multihot=True): super().__init__(G, hid_dims, num_layers, multihot) self.aggregator_type = aggregator_type self.feat_drop = feat_drop self.bias = bias self.norm = norm self.activation = activation self.input_layer = SAGEConv(self.in_dims, self.hid_dims, aggregator_type) self.hidden_layers = [ SAGEConv(self.hid_dims, self.hid_dims, aggregator_type) for _ in range(num_layers) ] self.output_layer = SAGEConv(self.hid_dims, self.out_dims, aggregator_type)
def __init__(self, num_features, num_classes, dim=10): super(NetGraphSage, self).__init__() self.conv1 = SAGEConv(num_features, dim, aggregator_type="mean", bias=False) self.conv2 = SAGEConv(dim, dim, aggregator_type="mean", bias=False) self.fc1 = Linear(dim, 1, bias=False)
def __init__(self, in_feats, hid_feats, out_feats): super().__init__() self.conv1 = SAGEConv(in_feats=in_feats, out_feats=hid_feats, aggregator_type="mean") self.conv2 = SAGEConv(in_feats=hid_feats, out_feats=out_feats, aggregator_type="mean")
def __init__(self, in_feats, n_hidden, n_classes, n_layers, activation, dropout): super().__init__() self.n_layers = n_layers self.n_hidden = n_hidden self.n_classes = n_classes self.layers = nn.ModuleList() self.layers.append(SAGEConv(in_feats, n_hidden, 'mean')) for i in range(1, n_layers - 1): self.layers.append(SAGEConv(n_hidden, n_hidden, 'mean')) self.layers.append(SAGEConv(n_hidden, n_classes, 'mean')) self.dropout = nn.Dropout(dropout) self.activation = activation
def __init__(self, in_feats, n_hidden, n_classes, n_layers, activation, dropout, aggregator_type): super(GraphSAGE, self).__init__() self.layers = nn.ModuleList() self.dropout = nn.Dropout(dropout) self.activation = activation # input layer self.layers.append(SAGEConv(in_feats, n_hidden, aggregator_type)) # hidden layers for i in range(n_layers - 1): self.layers.append(SAGEConv(n_hidden, n_hidden, aggregator_type)) # output layer self.layers.append(SAGEConv(n_hidden, n_classes, aggregator_type)) # activation None
def __init__(self, in_feats, h_feats, num_classes, pooling): super(GCN_SAGEConv, self).__init__() assert isinstance(h_feats, list), "h_feats must be a list" assert len( h_feats) != 0, "h_feats is empty. unable to add hidden layers" self.list_of_layers = [] dim = [in_feats] + h_feats # Convolution (Hidden) Layers for i in range(1, len(dim)): self.list_of_layers.append( SAGEConv(dim[i - 1], dim[i], aggregator_type='pool')) # Final Layer self.final = nn.Linear(dim[-1], num_classes) # Pooling layer if pooling == "AvgPooling": self.pooling_layer = dgl.nn.AvgPooling() elif pooling == "MaxPooling": self.pooling_layer = dgl.nn.MaxPooling() elif pooling == "SumPooling": self.pooling_layer = dgl.nn.SumPooling() else: raise NotImplementedError
def __init__(self, n_hops: int, input_dim: int, hidden_dim: int, output_dim: int): super().__init__() self._n_hops = n_hops self._sage_layers = torch.nn.ModuleList() self._activate = torch.nn.ReLU() for i in range(n_hops): sage_in_dim = input_dim if i == 0 else hidden_dim sage_out_dim = output_dim if i == (n_hops-1) else hidden_dim self._sage_layers.append(SAGEConv( in_feats=sage_in_dim, out_feats=sage_out_dim, aggregator_type="mean"))
def __init__(self, in_feats, h_feats): super(Model, self).__init__() self.conv1 = SAGEConv(in_feats, h_feats, aggregator_type='mean') self.conv2 = SAGEConv(h_feats, h_feats, aggregator_type='mean') self.h_feats = h_feats
def __init__(self, in_feats, h_feats): super(GraphSAGE, self).__init__() self.conv1 = SAGEConv(in_feats, h_feats, 'mean') self.conv2 = SAGEConv(h_feats, h_feats, 'mean')
def __init__(self, in_features, hidden_size, num_classes): super(Net, self).__init__() self.layer1 = SAGEConv(in_features, hidden_size, 'max') self.layer2 = SAGEConv(hidden_size, num_classes, 'max')
def __init__(self, nfeat, nhid, nclass=2, dropout=False): super(SAGE, self).__init__() self.gc1 = SAGEConv(nfeat, nhid, aggregator_type='mean', bias=True) self.gc2 = SAGEConv(nhid, nclass, aggregator_type='lstm', bias=True)
def __init__(self, in_feats, hid_feats, out_feats): super().__init__() self.conv1 = SAGEConv(in_feats, hid_feats, 'mean') self.conv2 = SAGEConv(hid_feats, out_feats, 'mean')
def __init__(self): super(Net, self).__init__() self.layer1 = SAGEConv(1433, 16, 'gcn') self.layer2 = SAGEConv(16, 7, 'gcn')