Example #1
0
    def __init__(self, num_vocab, max_seq_len, node_encoder, emb_dim, num_layers, hidden, ratio=0.8, dropout=0, num_class=0):
        super(ASAP, self).__init__()

        self.num_class = num_class
        self.max_seq_len = max_seq_len
        self.node_encoder = node_encoder

        self.conv1 = GraphConv(emb_dim, hidden, aggr='mean')
        self.convs = torch.nn.ModuleList()
        self.pools = torch.nn.ModuleList()
        self.convs.extend([
            GraphConv(hidden, hidden, aggr='mean')
            for i in range(num_layers - 1)
        ])
        self.pools.extend([
            ASAPooling(hidden, ratio, dropout=dropout)
            for i in range((num_layers) // 2)
        ])
        self.jump = JumpingKnowledge(mode='cat')
        self.lin1 = Linear(num_layers * hidden, hidden)
        # self.lin2 = Linear(hidden, dataset.num_classes)

        if self.num_class > 0:  # classification
            self.graph_pred_linear = torch.nn.Linear(hidden, self.num_class)
        else:
            self.graph_pred_linear_list = torch.nn.ModuleList()
            for i in range(max_seq_len):
                self.graph_pred_linear_list.append(torch.nn.Linear(hidden, num_vocab))
Example #2
0
    def init_model(self, n_class, feature_num):
        num_layers = int(self.hyperparameters['num_layers'])
        hidden_size = int(2**self.hyperparameters['hidden'])
        lr = self.hyperparameters['lr']
        if self.hyperparameters['use_linear']:
            self.input_lin = Linear(feature_num, hidden_size)
            self.convs = torch.nn.ModuleList()
            for i in range(num_layers):
                self.convs.append(GraphConv(hidden_size, hidden_size))
            self.output_lin = Linear(hidden_size, n_class)

        else:
            if num_layers == 1:
                self.conv1 = GraphConv(in_channels=feature_num,
                                       out_channels=n_class)
            else:
                self.conv1 = GraphConv(in_channels=feature_num,
                                       out_channels=hidden_size)
                self.convs = torch.nn.ModuleList()
                for i in range(num_layers - 2):
                    self.convs.append(
                        GraphConv(in_channels=hidden_size,
                                  out_channels=hidden_size))
                self.conv2 = GraphConv(hidden_size, n_class)
        self.optimizer = torch.optim.Adam(self.parameters(),
                                          lr=lr,
                                          weight_decay=5e-4)

        self = self.to('cuda')

        torch.cuda.empty_cache()
Example #3
0
 def __init__(self,
              num_features,
              n_classes,
              num_hidden,
              num_hidden_layers,
              dropout,
              activation,
              aggr='add',
              bias=True):
     super(PGraphConv, self).__init__()
     # dropout
     if dropout:
         self.dropout = nn.Dropout(p=dropout)
     else:
         self.dropout = nn.Dropout(p=0.)
     #activation
     self.activation = activation
     # input layer
     self.conv_input = GraphConv(num_features,
                                 num_hidden,
                                 aggr=aggr,
                                 bias=bias)
     # Hidden layers
     self.layers = nn.ModuleList()
     for _ in range(num_hidden_layers):
         self.layers.append(
             GraphConv(num_hidden, num_hidden, aggr=aggr, bias=bias))
     # output layer
     self.conv_output = GraphConv(num_hidden, n_classes, bias=bias)
Example #4
0
 def __init__(self,
              input_feat_dim,
              node_dim1,
              node_dim2,
              dropout=0.2,
              adj_drop=0.2,
              decoder='concatDec',
              sigmoid=True,
              n_nodes=50):
     super().__init__()
     self.sigmoid = sigmoid
     self.adj_drop = adj_drop
     self.n_nodes = n_nodes
     self.node_dim2 = node_dim2
     self.dropout_layer = torch.nn.Dropout(dropout)
     # encode
     self.conv1 = GraphConv(input_feat_dim, node_dim1)
     self.conv2 = GraphConv(node_dim1, node_dim2)
     self.fc = torch.nn.Linear(n_nodes * node_dim2, node_dim2)
     # decode
     self.fc2 = torch.nn.Linear(node_dim2, n_nodes * node_dim2)
     if decoder == 'bilinearDec':
         self.decoder = bilinearDec(node_dim2)
     elif decoder == 'concatDec':
         self.decoder = concatDec(node_dim2, node_dim1, dropout)
     else:
         raise NotImplementedError
Example #5
0
    def __init__(self,
                 in_channels,
                 ratio=0.5,
                 gnn='GraphConv',
                 min_score=None,
                 multiplier=1,
                 **kwargs):
        super(SAGPooling, self).__init__()

        self.in_channels = in_channels
        self.ratio = ratio
        self.min_score = min_score
        self.multiplier = multiplier
        self.gnn_name = gnn

        assert gnn in ['GraphConv', 'GCN', 'GAT', 'SAGE']
        if gnn == 'GCN':
            self.gnn = GCNConv(self.in_channels, 1, **kwargs)
        elif gnn == 'GAT':
            self.gnn = GATConv(self.in_channels, 1, **kwargs)
        elif gnn == 'SAGE':
            self.gnn = SAGEConv(self.in_channels, 1, **kwargs)
        else:
            self.gnn = GraphConv(self.in_channels, 1, **kwargs)

        self.reset_parameters()
    def __init__(self, nfeat, nhid, nclass, dropout,nlayer=2):
        super(StandGraph2, self).__init__()
        self.conv1 = GraphConv(nfeat, nhid)
        self.conv2 = GraphConv(nhid, nclass)

        self.dropout_p = dropout
        self.sig = nn.Sigmoid()
Example #7
0
class GGCN(torch.nn.Module):
    def __init__(self,
                 num_layers=2,
                 hidden=200,
                 features_num=16,
                 num_class=2,
                 dropout=0.5):
        super(GGCN, self).__init__()
        self.conv1 = GraphConv(features_num, hidden, aggr='add')
        self.lin2 = Linear(hidden, num_class)
        self.dropout = dropout
        print("hidden=%d, dropout=%f" % (hidden, self.dropout))

    def reset_parameters(self):
        self.conv1.reset_parameters()
        self.lin2.reset_parameters()

    def forward(self, data):
        x, edge_index, edge_weight = data.x, data.edge_index, data.edge_weight
        x = F.relu(self.conv1(x, edge_index, edge_weight=edge_weight))
        x = F.dropout(x, p=self.dropout, training=self.training)
        x = self.lin2(x)
        return F.log_softmax(x, dim=-1)

    def __repr__(self):
        return self.__class__.__name__
Example #8
0
    def __init__(self, in_dim, out_dim):
        super(RCNet, self).__init__()

        inter_dim = 64

        self.conv1 = GraphConv(in_dim, inter_dim, 'mean')
        self.conv2 = GraphConv(inter_dim, out_dim)
Example #9
0
    def __init__(self, in_channels, number_hidden_layers, aggr, hidden_out_channel, out_channel, pool_layer, k=1):
        super(GCN_Net, self).__init__()
        self.in_channels = in_channels
        self.number_hidden_layers = number_hidden_layers #number of hidden GraphConv layers
        self.aggr = aggr # "add", "mean" or "max"
        self.pool_layer = pool_layer # 'add', 'max', 'mean' or 'sort'
        self.hidden_out_channel = hidden_out_channel
        self.out_channel = out_channel
        self.atom_encoder = AtomEncoder(emb_dim=self.in_channels)
        self.k = k

        
        self.graph_conv_list = nn.ModuleList()
        self.graph_conv_list.append(GraphConv(in_channels= self.in_channels, out_channels=self.hidden_out_channel, aggr=self.aggr))

        self.batchnorm = BatchNorm(in_channels=self.hidden_out_channel)

        if self.number_hidden_layers != 0 : 
            for i in range(self.number_hidden_layers):
                self.graph_conv_list.append(GraphConv(in_channels= self.hidden_out_channel, out_channels= self.hidden_out_channel, aggr=self.aggr))
           
        self.graph_conv_list.append(GraphConv(in_channels = self.hidden_out_channel, out_channels = self.out_channel, aggr=self.aggr))
         
        self.linear1 = nn.Linear(self.k*self.out_channel, 16)
        self.linear2 = nn.Linear(16, 1)
Example #10
0
    def __init__(self, input_dim, hidden_channels, target, num_layers=2):
        super(GNN, self).__init__()

        self.gconv1 = GraphConv(in_channels=672, out_channels=400, aggr='add')
        self.gconv2 = GraphConv(in_channels=400, out_channels=300, aggr='add')
        # self.gconv3 = GraphConv(in_channels=250, out_channels=64, aggr='add')
        # self.gconv3 = GraphConv(in_channels=(12,672), out_channels=64, aggr='add')

        self.conv1 = nn.Conv1d(12, 8, 5, 2)
        self.conv2 = nn.Conv1d(8, 4, 5, 2)
        self.conv3 = nn.Conv1d(4, 1, 5, 1)
        # self.conv4 = nn.Conv1d(1, 1, 6)

        self.mlp = Sequential(Linear(68, 32), ReLU(), Linear(32, 16), ReLU(),
                              Linear(16, 1))

        # MODEL CLASS ATTRIBUTES
        self.target = {
            'valence': 0,
            'arousal': 1,
            'dominance': 2,
            'liking': 3
        }[target]
        self.best_val_mse = float('inf')
        self.best_epoch = 0
        self.train_losses = []
        self.eval_losses = []
        self.eval_patience_count = 0
        self.eval_patience_reached = False
Example #11
0
    def __init__(self, args):
        super(Topkpool, self).__init__()
        self.args = args

        missing_keys = list(set(["features_num", "num_class", "num_graph_features",
                    "ratio", "dropout", "act"]) - set(self.args.keys()))
        if len(missing_keys) > 0:
            raise Exception("Missing keys: %s." % ','.join(missing_keys))

        self.num_features = self.args["features_num"]
        self.num_classes = self.args["num_class"]
        self.ratio = self.args["ratio"]
        self.dropout = self.args["dropout"]
        self.num_graph_features = self.args["num_graph_features"]

        self.conv1 = GraphConv(self.num_features, 128)
        self.pool1 = TopKPooling(128, ratio=self.ratio)
        self.conv2 = GraphConv(128, 128)
        self.pool2 = TopKPooling(128, ratio=self.ratio)
        self.conv3 = GraphConv(128, 128)
        self.pool3 = TopKPooling(128, ratio=self.ratio)

        self.lin1 = torch.nn.Linear(256 + self.num_graph_features, 128)
        self.lin2 = torch.nn.Linear(128, 64)
        self.lin3 = torch.nn.Linear(64, self.num_classes)
Example #12
0
 def __init__(self, hidden_channels):
     super(GNN, self).__init__()
     torch.manual_seed(12345)
     self.conv1 = GraphConv(dataset.num_node_features, hidden_channels)
     self.conv2 = GraphConv(hidden_channels, hidden_channels)
     self.conv3 = GraphConv(hidden_channels, hidden_channels)
     self.lin = Linear(hidden_channels, dataset.num_classes)
Example #13
0
    def __init__(self,
                 num_features,
                 output_channels,
                 nb_neurons=128,
                 **kwargs):
        """

        Parameters
        ----------
        num_features: int
            number of node features
        output_channels: int
            number of classes
        """
        super(GraphConv3TPK, self).__init__()

        self.conv1 = GraphConv(num_features, nb_neurons)
        self.pool1 = TopKPooling(nb_neurons, ratio=0.8)
        self.conv2 = GraphConv(nb_neurons, nb_neurons)
        self.pool2 = TopKPooling(nb_neurons, ratio=0.8)
        self.conv3 = GraphConv(nb_neurons, nb_neurons)
        self.pool3 = TopKPooling(nb_neurons, ratio=0.8)

        self.lin1 = torch.nn.Linear(nb_neurons, 64)
        self.lin2 = torch.nn.Linear(64, output_channels)
    def __init__(self,
                 dim_features,
                 dim_target,
                 config,
                 activation=lambda x: x,
                 withbn=True,
                 withloop=True):

        super(GCN, self).__init__()

        self.dropout = config['dropout']

        self.ingc = GraphConv(dim_features, config['embedding_dim'])
        self.inbn = torch.nn.BatchNorm1d(config['embedding_dim'])
        self.midlayer = nn.ModuleList()
        self.bns = nn.ModuleList()

        for i in range(config['num_layers']):
            gcb = GraphConv(config['embedding_dim'], config['embedding_dim'])
            self.midlayer.append(gcb)
            bn2 = torch.nn.BatchNorm1d(config['embedding_dim'])
            self.bns.append(bn2)

        self.classifier = SimpleClassifier(config['embedding_dim'], 64,
                                           dim_target, 0.5)
Example #15
0
 def __init__(self, num_feature, num_class):
     super(Net, self).__init__()
     self.conv1 = GraphConv(num_feature, 128)
     self.conv2 = GraphConv(128, 64)
     
     self.lin1 = torch.nn.Linear(64, 128)
     self.lin2 = torch.nn.Linear(128, 64)
     self.lin3 = torch.nn.Linear(64, num_class)
    def __init__(self):
        super(FeatureExtractor, self).__init__()

        self.num_features = num_features
        self.nhid = nhid

        self.conv1 = GraphConv(self.num_features, self.nhid * 2)
        self.conv2 = GraphConv(self.nhid * 2, self.nhid * 2)
Example #17
0
 def __init__(self, num_layers=2, hidden=32, features_num=32, num_class=2):
     super(GraphGNN, self).__init__()
     self.first_lin = Linear(features_num, hidden)
     self.conv1 = GraphConv(hidden, hidden, aggr='add')
     self.conv2 = GraphConv(hidden, hidden, aggr='add')
     self.fuse_weight = torch.nn.Parameter(torch.FloatTensor(num_layers),requires_grad=True)
     self.fuse_weight.data.fill_(float(1) / (num_layers + 1))
     self.out = Linear(hidden, num_class)
Example #18
0
 def __init__(self, num_features, num_classes=10):
     super(GNNNet, self).__init__()
     self.conv1 = GraphConv(num_features, 32)
     self.conv2 = GraphConv(32, 64)
     self.conv3 = GraphConv(64, 64)
     self.fc1 = torch.nn.Linear(64, 64)
     self.fc2 = torch.nn.Linear(64, 32)
     self.fc3 = torch.nn.Linear(32, num_classes)
Example #19
0
 def __init__(self, dataset, num_layers, hidden):
     super(Graclus, self).__init__()
     self.conv1 = GraphConv(dataset.num_features, hidden, aggr='mean')
     self.convs = torch.nn.ModuleList()
     for i in range(num_layers - 1):
         self.convs.append(GraphConv(hidden, hidden, aggr='mean'))
     self.lin1 = Linear(num_layers * hidden, hidden)
     self.lin2 = Linear(hidden, dataset.num_classes)
Example #20
0
 def __init__(self, hidden_channels):
     super().__init__()
     in_channels = dataset.num_node_features
     out_channels = dataset.num_classes
     self.conv1 = GraphConv(in_channels, hidden_channels)
     self.conv2 = GraphConv(hidden_channels, hidden_channels)
     self.conv3 = GraphConv(hidden_channels, hidden_channels)
     self.lin = torch.nn.Linear(3 * hidden_channels, out_channels)
Example #21
0
    def __init__(self, num_classes):
        super(GCNConv, self).__init__()

        self.conv1 = GraphConv(6, 64, aggr='add')
        self.conv2 = GraphConv(64 + 6, 128, aggr='add')
        self.conv3 = GraphConv(192 + 12, 256, aggr='add')
        # CAREFUL: If modifying here, check line 202 in experiments.py for pretrained model
        self.lin1 = torch.nn.Linear(256, num_classes)
Example #22
0
class ASAP(torch.nn.Module):
    def __init__(self,
                 num_classes,
                 num_features,
                 num_layers,
                 hidden,
                 ratio=0.8,
                 dropout=0):
        super(ASAP, self).__init__()
        self.conv1 = GraphConv(num_features, hidden, aggr='mean')
        self.convs = torch.nn.ModuleList()
        self.pools = torch.nn.ModuleList()
        self.convs.extend([
            GraphConv(hidden, hidden, aggr='mean')
            for i in range(num_layers - 1)
        ])
        self.pools.extend([
            ASAPooling(hidden, ratio, dropout=dropout)
            for i in range((num_layers) // 2)
        ])
        self.jump = JumpingKnowledge(mode='cat')
        self.lin1 = Linear(num_layers * hidden, hidden)
        self.lin2 = Linear(hidden, num_classes)

    def reset_parameters(self):
        self.conv1.reset_parameters()
        for conv in self.convs:
            conv.reset_parameters()
        for pool in self.pools:
            pool.reset_parameters()
        self.lin1.reset_parameters()
        self.lin2.reset_parameters()

    def forward(self, data):
        x, edge_index, batch = data.x, data.edge_index, data.batch
        edge_weight = None
        x = F.relu(self.conv1(x, edge_index))
        xs = [global_mean_pool(x, batch)]
        for i, conv in enumerate(self.convs):
            x = conv(x=x, edge_index=edge_index, edge_weight=edge_weight)
            x = F.relu(x)
            xs += [global_mean_pool(x, batch)]
            if i % 2 == 0 and i < len(self.convs) - 1:
                pool = self.pools[i // 2]
                x, edge_index, edge_weight, batch, _ = pool(
                    x=x,
                    edge_index=edge_index,
                    edge_weight=edge_weight,
                    batch=batch)

        x = self.jump(xs)
        x = F.relu(self.lin1(x))
        x = F.dropout(x, p=0.5, training=self.training)
        x = self.lin2(x)
        return F.log_softmax(x, dim=-1)

    def __repr__(self):
        return self.__class__.__name__
Example #23
0
    def __init__(self, num_layers, num_input_features, hidden):
        super(GraphNN, self).__init__()
        self.conv1 = GraphConv(num_input_features, hidden)
        self.convs = torch.nn.ModuleList()
        for i in range(num_layers - 1):
            self.convs.append(GraphConv(hidden, hidden))

        self.lin1 = torch.nn.Linear(3 * hidden, hidden)
        self.lin2 = torch.nn.Linear(hidden, 2)
Example #24
0
def test_gcn_conv():
    in_channels, out_channels = (16, 32)
    edge_index = torch.tensor([[0, 0, 0, 1, 2, 3], [1, 2, 3, 0, 0, 0]])
    num_nodes = edge_index.max().item() + 1
    x = torch.randn((num_nodes, in_channels))

    conv = GraphConv(in_channels, out_channels)
    assert conv.__repr__() == 'GraphConv(16, 32)'
    assert conv(x, edge_index).size() == (num_nodes, out_channels)
    def __init__(self):
        super(FeatureExtractor, self).__init__()

        self.num_features = 14
        self.nhid = 128

        self.conv1 = GraphConv(self.num_features, self.nhid)
        self.pool1 = TopKPooling(self.nhid, ratio=0.8)
        self.conv2 = GraphConv(self.nhid, self.nhid)
        self.pool2 = TopKPooling(self.nhid, ratio=0.8)
Example #26
0
    def __init__(self, in_channels, ratio=0.5):
        super(SAGPool, self).__init__()

        self.in_channels = in_channels
        self.ratio = ratio

        self.conv_1 = GraphConv(in_channels, 1)
        self.conv_2 = GraphConv(in_channels, 1)
        self.add_module('attention_layer', self.conv_1)
        self.add_module('attention_layer', self.conv_2)
Example #27
0
 def __init__(self, hidden_channels):
     super(GCN2, self).__init__()
     self.batchn1 = BatchNorm(dataset.num_node_features)
     self.conv1 = GraphConv(dataset.num_node_features, hidden_channels)
     self.batchn2 = BatchNorm(hidden_channels)
     self.conv2 = GraphConv(hidden_channels, hidden_channels)
     self.batchn3 = BatchNorm(hidden_channels)
     self.conv3 = GraphConv(hidden_channels, hidden_channels)
     self.batchn4 = BatchNorm(hidden_channels)
     self.conv4 = GraphConv(hidden_channels, hidden_channels)
     self.lin = Linear(hidden_channels, dataset.num_classes)
Example #28
0
 def __init__(self, dataset, num_layers, hidden):
     super(TopK, self).__init__()
     self.conv1 = GraphConv(dataset.num_features, hidden, aggr='mean')
     self.convs = torch.nn.ModuleList()
     self.pools = torch.nn.ModuleList()
     for i in range(num_layers - 1):
         self.convs.append(GraphConv(hidden, hidden, aggr='mean'))
         self.pools.append(TopKPooling(hidden, ratio=0.8))
     self.jump = JumpingKnowledge(mode='cat')
     self.lin1 = Linear(num_layers * hidden, hidden)
     self.lin2 = Linear(hidden, dataset.num_classes)
Example #29
0
 def __init__(self,
              num_layers=2,
              hidden=200,
              features_num=16,
              num_class=2,
              dropout=0.5):
     super(GGCN, self).__init__()
     self.conv1 = GraphConv(features_num, hidden, aggr='add')
     self.lin2 = Linear(hidden, num_class)
     self.dropout = dropout
     print("hidden=%d, dropout=%f" % (hidden, self.dropout))
Example #30
0
    def __init__(self, in_channels, node_score_method=None, dropout=0):
        super(StarPooling, self).__init__()
        self.in_channels = in_channels
        if node_score_method is None:
            node_score_method = self.compute_node_score_tanh

        self.compute_node_score = node_score_method
        self.dropout = dropout

        self.score_func = GraphConv(in_channels, 1)
        self.reset_parameters()