Beispiel #1
0
    def __init__(self, num_feats=None, dims=None, num_classes=2, message_passing='GCN', GCN_improved=False,
                 GAT_heads=None, skip=True):
        super(PiNet, self).__init__()
        if dims is None:
            dims = [64, 64]
        self.skip = skip
        if message_passing == 'GCN':
            from torch_geometric.nn import GCNConv

            self.mp_a1 = GCNConv(num_feats, dims[0], improved=GCN_improved)
            self.mp_x1 = GCNConv(num_feats, dims[0], improved=GCN_improved)

            skip_dims = dims[0] + num_feats if skip else dims[0]
            self.linear_a = torch.nn.Linear(skip_dims, dims[0])
            self.linear_x = torch.nn.Linear(skip_dims, dims[0])

            self.mp_a2 = GCNConv(dims[0], dims[1], improved=GCN_improved)
            self.mp_x2 = GCNConv(dims[0], dims[1], improved=GCN_improved)

            skip_dims2 = dims[1] + dims[0] if skip else dims[1]
            self.linear2 = torch.nn.Linear(skip_dims2 ** 2, num_classes)

        elif message_passing == 'GAT':
            from torch_geometric.nn import GATConv

            if (GAT_heads == None):
                GAT_heads = [5, 2]
            self.mp_a1 = GATConv(num_feats, dims[0], heads=GAT_heads[0])
            self.mp_x1 = GATConv(num_feats, dims[0], heads=GAT_heads[0])

            skip_dims = (dims[0] * GAT_heads[0]) + num_feats if skip else dims[0] * GAT_heads[0]

            self.linear_a = torch.nn.Linear(skip_dims, dims[0])
            self.linear_x = torch.nn.Linear(skip_dims, dims[0])

            self.mp_a2 = GATConv(dims[0] if skip else dims[0] * GAT_heads[0], dims[1], heads=GAT_heads[1])
            self.mp_x2 = GATConv(dims[0] if skip else dims[0] * GAT_heads[0], dims[1], heads=GAT_heads[1])

            skip_dims2 = (dims[1] * GAT_heads[1]) + dims[0] if skip else dims[1] * GAT_heads[1]

            self.linear2 = torch.nn.Linear(skip_dims2 ** 2, num_classes)

        elif message_passing == 'SAGE':
            from torch_geometric.nn import SAGEConv

            self.mp_a1 = SAGEConv(num_feats, dims[0])
            self.mp_x1 = SAGEConv(num_feats, dims[0])

            skip_dims = dims[0] + num_feats if skip else dims[0]
            self.linear_a = torch.nn.Linear(skip_dims, dims[0])
            self.linear_x = torch.nn.Linear(skip_dims, dims[0])

            self.mp_a2 = SAGEConv(dims[0], dims[1])
            self.mp_x2 = SAGEConv(dims[0], dims[1])

            skip_dims2 = dims[1] + dims[0] if skip else dims[1]
            self.linear2 = torch.nn.Linear(skip_dims2 ** 2, num_classes)
 def __init__(self, dataset, num_layers, hidden):
     super(GlobalAttentionNet, self).__init__()
     self.conv1 = SAGEConv(dataset.num_features, hidden)
     self.convs = torch.nn.ModuleList()
     for i in range(num_layers - 1):
         self.convs.append(SAGEConv(hidden, hidden))
     self.att = GlobalAttention(Linear(hidden, 1))
     self.lin1 = Linear(hidden, hidden)
     self.lin2 = Linear(hidden, dataset.num_classes)
Beispiel #3
0
 def __init__(self, dataset, num_layers, hidden):
     super().__init__()
     self.conv1 = SAGEConv(dataset.num_features, hidden)
     self.convs = torch.nn.ModuleList()
     for i in range(num_layers - 1):
         self.convs.append(SAGEConv(hidden, hidden))
     self.set2set = Set2Set(hidden, processing_steps=4)
     self.lin1 = Linear(2 * hidden, hidden)
     self.lin2 = Linear(hidden, dataset.num_classes)
Beispiel #4
0
 def __init__(self, in_channels, opts):
     super(FAE_SAGEConv, self).__init__()
     self.opts = opts
     if self.opts.problem == 'Prediction':
         self.conv1 = SAGEConv(in_channels, 64)
         self.conv2 = SAGEConv(64, 32)
         self.lin = Lin(32, 1)
     else:
         self.conv1 = SAGEConv(in_channels, 64)
         self.lin = Lin(64, in_channels)
Beispiel #5
0
 def __init__(self, input_channels, hidden_channels, out_channels, num_layers=3, dropout_ratio=0., aggr='mean'):
     super().__init__()
     torch.manual_seed(1234567)
     self.num_layers = num_layers
     self.dropout_ratio = dropout_ratio
     self.convs = nn.ModuleList()
     self.convs.append(SAGEConv(input_channels, hidden_channels, aggr=aggr))
     for i in range(num_layers-2):
         self.convs.append(SAGEConv(hidden_channels,hidden_channels, aggr=aggr))
     self.convs.append(SAGEConv(hidden_channels, out_channels, aggr=aggr))
 def __init__(self, dataset, num_layers, hidden):
     super(GraphSAGE, self).__init__()
     self.conv1 = SAGEConv(dataset.num_features, hidden)
     #self.conv1 = SAGELafConv(dataset.num_features, hidden)
     self.convs = torch.nn.ModuleList()
     for i in range(num_layers - 2):
         #self.convs.append(SAGEConv(hidden, hidden))
         self.convs.append(SAGELafConv(hidden, hidden))
     #self.convn = SAGELafConv(hidden, dataset.num_classes)
     self.convn = SAGEConv(hidden, dataset.num_classes)
Beispiel #7
0
    def __init__(self, in_channels, hidden_channels, out_channels, num_layers):
        super(SAGE, self).__init__()

        self.num_layers = num_layers

        self.convs = torch.nn.ModuleList()
        self.convs.append(SAGEConv(in_channels, hidden_channels))
        for _ in range(num_layers - 2):
            self.convs.append(SAGEConv(hidden_channels, hidden_channels))
        self.convs.append(SAGEConv(hidden_channels, out_channels))
Beispiel #8
0
    def __init__(self, input_dim, out_dim, filter_num, dropout=False, layer=2):
        super(SAGEModel, self).__init__()
        self.dropout = dropout
        self.conv1 = SAGEConv(input_dim, filter_num)
        self.conv2 = SAGEConv(filter_num, filter_num)
        self.Conv = nn.Conv1d(filter_num, out_dim, kernel_size=1)

        self.layer = layer
        if layer == 3:
            self.conv3 = SAGEConv(filter_num, filter_num)
 def __init__(self, in_channels, hidden_channels, output_dim, num_layers):
     super(SortPool, self).__init__()
     self.k = 30
     self.conv1 = SAGEConv(in_channels, hidden_channels)
     self.convs = torch.nn.ModuleList()
     for i in range(num_layers - 1):
         self.convs.append(SAGEConv(hidden_channels, hidden_channels))
     self.conv1d = Conv1d(hidden_channels, 32, 5)
     self.lin1 = Linear(32 * (self.k - 5 + 1), hidden_channels)
     self.lin2 = Linear(hidden_channels, output_dim)
 def __init__(self, dataset, num_layers, hidden):
     super().__init__()
     self.k = 30
     self.conv1 = SAGEConv(dataset.num_features, hidden)
     self.convs = torch.nn.ModuleList()
     for i in range(num_layers - 1):
         self.convs.append(SAGEConv(hidden, hidden))
     self.conv1d = Conv1d(hidden, 32, 5)
     self.lin1 = Linear(32 * (self.k - 5 + 1), hidden)
     self.lin2 = Linear(hidden, dataset.num_classes)
Beispiel #11
0
 def __init__(self, in_channels, hidden_channels, out_channels, num_layers):
     super(Net, self).__init__()
     self.convs = torch.nn.ModuleList()
     self.batch_norms = torch.nn.ModuleList()
     self.convs.append(SAGEConv(in_channels, hidden_channels))
     self.batch_norms.append(BatchNorm(hidden_channels))
     for _ in range(num_layers - 2):
         self.convs.append(SAGEConv(hidden_channels, hidden_channels))
         self.batch_norms.append(BatchNorm(hidden_channels))
     self.convs.append(SAGEConv(hidden_channels, out_channels))
Beispiel #12
0
    def __init__(self, dim_features, dim_embedding):
        super(Encoder, self).__init__()
        #self.conv = SAGEConv(dim_features, dim_embedding)
        #self.prelu = nn.PReLU(dim_embedding)

        self.prelu = nn.PReLU()  # like in Repository
        self.gcn1 = SAGEConv(dim_features, dim_embedding, normalize=True)
        self.gcn2 = SAGEConv(dim_embedding, dim_embedding, normalize=True)
        self.gcn3 = SAGEConv(dim_embedding, dim_embedding, normalize=True)
        self.Wskip = Linear(dim_features, dim_embedding)
Beispiel #13
0
 def __init__(self, data, num_classes, args):
     super(GraphSAGE, self).__init__()
     self.args = args
     self.data = data
     self.conv1 = SAGEConv(self.data.num_features,
                           self.args.sage_hidden,
                           normalize=False)
     self.conv2 = SAGEConv(self.args.sage_hidden,
                           num_classes,
                           normalize=False)
Beispiel #14
0
    def __init__(self, input_dim, hidden_dim):
        super(MetaNet, self).__init__()

        self.conv1 = SAGEConv(input_dim, hidden_dim)
        self.drop1 = nn.Dropout(p=0.1)

        self.conv2 = SAGEConv(hidden_dim, hidden_dim)
        self.drop2 = nn.Dropout(p=0.1)

        self.lin = nn.Linear(hidden_dim * 2, 1)
Beispiel #15
0
 def __init__(self, n_features):
     super(SageNet, self).__init__()
     self.conv1 = SAGEConv(n_features, 16, normalize=False)
     self.conv2 = SAGEConv(16, 16, normalize=False)
     self.conv3 = SAGEConv(16, 16, normalize=False)
     self.conv4 = SAGEConv(16, 16, normalize=False)
     self.conv5 = SAGEConv(16, 16, normalize=False)
     self.conv6 = SAGEConv(16, 16, normalize=False)
     self.conv7 = SAGEConv(16, 16, normalize=False)
     self.conv8 = SAGEConv(16, 16, normalize=False)
     self.conv9 = SAGEConv(16, 16, normalize=False)
     self.lin1 = Linear(16, 64)
     self.lin2 = Linear(64, 16)
     self.out = Linear(16, 1)
     self.s1 = SELU()
     self.s2 = SELU()
     self.s3 = SELU()
     self.s4 = SELU()
     self.s5 = SELU()
     self.s6 = SELU()
     self.s7 = SELU()
     self.s8 = SELU()
     self.s9 = SELU()
     self.s10 = SELU()
     self.s11 = SELU()
Beispiel #16
0
    def __init__(self, in_channels, cls_num):
        super(Net, self).__init__()
        # self.conv1 = GCNConv(in_channels, 64)
        # self.conv2 = GCNConv(64, 32)
        # self.conv3 = GCNConv(32, cls_num+1)

        self.conv1 = SAGEConv(in_channels, 64)
        self.bn1 = nn.BatchNorm1d(64)
        self.conv2 = SAGEConv(64, 32)
        self.bn2 = nn.BatchNorm1d(32)
        self.conv3 = SAGEConv(32, cls_num)
Beispiel #17
0
 def __init__(self, dataset, device,loss_function,mode='unsupervised',conv='GCN',hidden_layer=64,out_layer =128,dropout = 0,num_layers=2):
     super(Net, self).__init__()
     self.mode = mode
     self.conv=conv
     self.num_layers = num_layers        
     self.data = dataset
     self.num_features = dataset.x.shape[1]
     #print(dataset.num_features)
     self.loss_function = loss_function
     self.convs = torch.nn.ModuleList()
     self.hidden_layer =hidden_layer
     self.out_layer = out_layer
     self.dropout = dropout
     self.device=device
 
     if self.mode=='unsupervised':
         out_channels = self.out_layer
     elif self.mode=='supervised':
         out_channels =len(collections.Counter(self.data.y.tolist()).keys()) #128 FOR LINK PREDICTION, FOR NODE CLASSIFICATION UNCOMMENT
     if self.conv == 'GCN':
         if self.num_layers == 1:
             self.convs.append(GCNConv(self.num_features, out_channels))
         else:
             self.convs.append(GCNConv(self.num_features, self.hidden_layer))
             for i in range(1,self.num_layers-1):
                 self.convs.append(GCNConv(self.hidden_layer, self.hidden_layer))
             self.convs.append(GCNConv(self.hidden_layer, out_channels))
     elif self.conv == 'SAGE':
         
         if self.num_layers == 1:
             self.convs.append(SAGEConv(self.num_features, out_channels))
         else:
             self.convs.append(SAGEConv(self.num_features, self.hidden_layer))
             for i in range(1,self.num_layers-1):
                 self.convs.append(SAGEConv(self.hidden_layer, self.hidden_layer))
             self.convs.append(SAGEConv(self.hidden_layer, out_channels))            
     elif self.conv == 'GAT':
         if self.num_layers == 1:
             self.convs.append(GATConv(self.num_features, out_channels))
         else: 
             self.convs.append(GATConv(self.num_features, self.hidden_layer))
             for i in range(1,self.num_layers-1):
                 self.convs.append(GATConv(self.hidden_layer, self.hidden_layer))
             self.convs.append(GATConv(self.hidden_layer, out_channels))
             
     if loss_function["loss var"] == "Random Walks":
         self.loss = self.lossRandomWalks
     elif loss_function["loss var"] == "Context Matrix":
         self.loss = self.lossContextMatrix
     elif loss_function["loss var"] == "Factorization":
         self.loss = self.lossFactorization
     elif loss_function["loss var"] == "Laplacian EigenMaps":
         self.loss = self.lossLaplacianEigenMaps
     self.reset_parameters()
Beispiel #18
0
    def __init__(self, num_node_features, num_of_classes=2):
        super(Net_1, self).__init__()
        self.conv1 = SAGEConv(num_node_features, 128)
        self.pool1 = TopKPooling(128, ratio=0.5)
        self.conv2 = SAGEConv(128, 128)
        self.pool2 = TopKPooling(128, ratio=0.5)
        self.conv3 = SAGEConv(128, 128)
        self.pool3 = TopKPooling(128, ratio=0.5)

        self.lin1 = torch.nn.Linear(256, 128)
        self.lin2 = torch.nn.Linear(128, 64)
        self.lin3 = torch.nn.Linear(64, num_of_classes)
Beispiel #19
0
 def __init__(self, dataset, num_layers, hidden, mode='cat'):
     super().__init__()
     self.conv1 = SAGEConv(dataset.num_features, hidden)
     self.convs = torch.nn.ModuleList()
     for i in range(num_layers - 1):
         self.convs.append(SAGEConv(hidden, hidden))
     self.jump = JumpingKnowledge(mode)
     if mode == 'cat':
         self.lin1 = Linear(num_layers * hidden, hidden)
     else:
         self.lin1 = Linear(hidden, hidden)
     self.lin2 = Linear(hidden, dataset.num_classes)
    def __init__(self):
        super(Conv, self).__init__()
        self.conv1 = nn.Conv2d(5, 32, 3, padding=1)
        self.conv2 = nn.Conv2d(32, 64, 3, padding=1)
        self.conv3 = nn.Conv2d(64, 128, 3, padding=1)
        self.conv4 = nn.Conv2d(128, 256, 3, padding=1)
        self.conv5 = nn.Conv2d(256, 256, 3, padding=1)

        #self.sagpool = SAGPooling(256, ratio=0.8,min_score=None)
        self.sage1 = SAGEConv(256, 128, bias=True, normalize=True)
        self.sage2 = SAGEConv(256, 128, bias=True, normalize=True)
        self.sage3 = SAGEConv(128, 2, bias=True, normalize=True)
Beispiel #21
0
    def __init__(self, feature, out_channel):
        super(GraphSage, self).__init__()

        self.GConv1 = SAGEConv(feature, 1024)
        self.bn1 = BatchNorm(1024)

        self.GConv2 = SAGEConv(1024, 1024)
        self.bn2 = BatchNorm(1024)

        self.fc = nn.Sequential(nn.Linear(1024, 512), nn.ReLU(inplace=True))
        self.dropout = nn.Dropout(0.2)
        self.fc1 = nn.Sequential(nn.Linear(512, out_channel))
Beispiel #22
0
 def __init__(self, num_input_features, num_layers, hidden, mode='cat'):
     super(GraphSAGEWithJK, self).__init__()
     self.conv1 = SAGEConv(num_input_features, hidden)
     self.convs = torch.nn.ModuleList()
     for i in range(num_layers - 1):
         self.convs.append(SAGEConv(hidden, hidden))
     self.jump = JumpingKnowledge(mode)
     if mode == 'cat':
         self.lin1 = Linear(3 * num_layers * hidden, hidden)
     else:
         self.lin1 = Linear(3 * hidden, hidden)
     self.lin2 = Linear(hidden, 2)
Beispiel #23
0
 def __init__(self, in_dim, out_dim, hidden_dim, layers, dropout=0):
     super().__init__()
     assert layers >= 2
     self.dropout = dropout
     self.conv_layers = nn.ModuleList(
         [SAGEConv(in_dim, hidden_dim, normalize=True)])
     for _ in range(layers - 2):
         self.conv_layers.append(
             SAGEConv(hidden_dim, hidden_dim, normalize=True))
     self.conv_layers.append(SAGEConv(hidden_dim, out_dim, normalize=True))
     self.bn_layers = nn.ModuleList(
         [BatchNorm(hidden_dim) for _ in range(layers-1)])
Beispiel #24
0
    def __init__(self, in_channels, hidden_channels, out_channels, num_layers,
                 dropout):
        super(SAGE, self).__init__()

        self.convs = torch.nn.ModuleList()
        self.convs.append(SAGEConv(in_channels, hidden_channels, concat=True))
        for _ in range(num_layers - 2):
            self.convs.append(
                SAGEConv(hidden_channels, hidden_channels, concat=True))
        self.convs.append(SAGEConv(hidden_channels, out_channels, concat=True))

        self.dropout = dropout
Beispiel #25
0
 def __init__(self, in_feats, hidden_dim, num_classes, num_layers, out_channel, kernel_size, k=30, dropout=0.5):
     super(SortPool, self).__init__()
     self.k = k
     self.dropout = dropout
     self.num_layers = num_layers
     self.gnn_convs = nn.ModuleList()
     self.gnn_convs.append(SAGEConv(in_feats, hidden_dim))
     for _ in range(self.num_layers - 1):
         self.gnn_convs.append(SAGEConv(hidden_dim, hidden_dim))
     self.conv1d = nn.Conv1d(hidden_dim, out_channel, kernel_size)
     self.fc1 = nn.Linear(out_channel * (self.k - kernel_size + 1), hidden_dim)
     self.fc2 = nn.Linear(hidden_dim, num_classes)
Beispiel #26
0
    def __init__(self, in_channels, hidden_channels, out_channels, num_layers,
                 dropout):
        super(GCN, self).__init__()
        self.inProj = torch.nn.Linear(in_channels, hidden_channels)
        self.convs = torch.nn.ModuleList()
        for _ in range(num_layers):
            self.convs.append(SAGEConv(hidden_channels, hidden_channels))
        self.convs.append(SAGEConv(hidden_channels, out_channels))

        self.linear = torch.nn.Linear(hidden_channels, out_channels)
        self.weights = torch.nn.Parameter(torch.randn((len(self.convs))))
        self.dropout = dropout
Beispiel #27
0
 def __init__(self,
              num_features,
              output_channels,
              num_layers=3,
              hidden=128,
              **kwargs):
     super(GraphSAGE, self).__init__()
     self.conv1 = SAGEConv(num_features, hidden)
     self.convs = torch.nn.ModuleList()
     for i in range(num_layers - 1):
         self.convs.append(SAGEConv(hidden, hidden))
     self.lin1 = Linear(hidden, hidden)
     self.lin2 = Linear(hidden, output_channels)
Beispiel #28
0
 def __init__(self, num_node_features, hidden_channels, embed_dim=64):
     super(Net, self).__init__()
     self.device = torch.device(
         "cuda:0" if torch.cuda.is_available() else "cpu")
     in_channels = num_node_features
     out_channels = embed_dim
     self.decoder = InnerProductDecoder().to(self.device)
     self.conv1 = SAGEConv(in_channels, hidden_channels)
     self.conv2 = SAGEConv(hidden_channels, hidden_channels)
     self.conv3 = SAGEConv(hidden_channels, hidden_channels)
     #self.lin = torch.nn.Linear(3 * hidden_channels, out_channels)
     self.lin = torch.nn.Linear(hidden_channels, out_channels)
     self.linear_decoder1 = torch.nn.Linear(1, 1)
Beispiel #29
0
 def __init__(self,
              features_num=16,
              num_class=2,
              num_layers=2,
              hidden=128,
              dropout=0.3):
     super(GraphSAGE, self).__init__()
     self.conv1 = SAGEConv(features_num, hidden)
     self.convs = torch.nn.ModuleList()
     for i in range(num_layers - 2):
         self.convs.append(SAGEConv(hidden, hidden))
     self.conv_last = SAGEConv(hidden, num_class)
     self.dropout = dropout
Beispiel #30
0
    def __init__(self):
        super(Net, self).__init__()

        self.conv1 = SAGEConv(dataset.num_features, 128)
        self.pool1 = TopKPooling(128, ratio=0.8)
        self.conv2 = SAGEConv(128, 128)
        self.pool2 = TopKPooling(128, ratio=0.8)
        self.conv3 = SAGEConv(128, 128)
        self.pool3 = TopKPooling(128, ratio=0.8)

        self.lin1 = torch.nn.Linear(256, 128)
        self.lin2 = torch.nn.Linear(128, 64)
        self.lin3 = torch.nn.Linear(64, dataset.num_classes)