예제 #1
0
def test_arma_conv():
    in_channels, out_channels = (16, 32)
    num_stacks, num_layers = 8, 4
    edge_index = torch.tensor([[0, 0, 0, 1, 2, 3], [1, 2, 3, 0, 0, 0]])
    num_nodes = edge_index.max().item() + 1
    edge_weight = torch.rand(edge_index.size(1))

    x = torch.randn((num_nodes, in_channels))

    conv = ARMAConv(in_channels,
                    out_channels,
                    num_stacks,
                    num_layers,
                    dropout=0.25)
    assert conv.__repr__() == 'ARMAConv(16, 32, num_stacks=8, num_layers=4)'
    assert conv(x, edge_index).size() == (num_nodes, out_channels)
    assert conv(x, edge_index, edge_weight).size() == (num_nodes, out_channels)

    conv = ARMAConv(in_channels,
                    out_channels,
                    num_stacks,
                    num_layers,
                    shared_weights=True)
    assert conv(x, edge_index).size() == (num_nodes, out_channels)
    assert conv(x, edge_index, edge_weight).size() == (num_nodes, out_channels)
예제 #2
0
파일: arma.py 프로젝트: code-harvey/AutoGL
    def init_model(self, n_class, feature_num):
        hidden_size = int(2 ** self.hyperparameters['hidden'])
        num_stacks = int(self.hyperparameters['num_stacks'])
        conv_layers = int(self.hyperparameters['conv_layers'])
        lr = self.hyperparameters['lr']
        dropout = self.hyperparameters['dropout_rate']
        num_layers = int(self.hyperparameters['num_layers'])
        if self.hyperparameters['use_linear']:
            self.input_lin = Linear(feature_num, hidden_size)
            self.convs = torch.nn.ModuleList()
            for i in range(num_layers):
                self.convs.append(ARMAConv(hidden_size, hidden_size, num_stacks=num_stacks, num_layers=conv_layers, dropout=dropout))
            self.output_lin = Linear(hidden_size, n_class)
        else:
            if num_layers == 1:
                self.conv1 = ARMAConv(feature_num, n_class, num_stacks=num_stacks,\
                 num_layers=conv_layers, shared_weights=False, dropout=dropout)
            else:
                self.conv1 = ARMAConv(feature_num, hidden_size, num_stacks=num_stacks,\
                    num_layers=conv_layers, shared_weights=False, dropout=dropout)
                self.convs = torch.nn.ModuleList()
                for i in range(num_layers - 2):    
                    self.convs.append(ARMAConv(hidden_size, hidden_size, num_stacks=num_stacks,\
                        num_layers=conv_layers, shared_weights=False, dropout=dropout))
                self.conv2 = ARMAConv(hidden_size, n_class, num_stacks=num_stacks,\
                        num_layers=conv_layers, shared_weights=False, dropout=dropout)
        self.optimizer = torch.optim.Adam(self.parameters(), lr=lr, weight_decay=5e-4)

        self = self.to('cuda')

        torch.cuda.empty_cache()
예제 #3
0
class ARMA_Net(torch.nn.Module):
    def __init__(self, features_num, num_class, hidden, num_stacks, num_layers,
                 shared_weights, dropout, skip_dropout):
        super(ARMA_Net, self).__init__()
        self.dropout = dropout
        self.conv1 = ARMAConv(features_num,
                              hidden,
                              num_stacks,
                              num_layers,
                              shared_weights,
                              dropout=skip_dropout)
        self.conv2 = ARMAConv(hidden,
                              num_class,
                              num_stacks,
                              num_layers,
                              shared_weights,
                              dropout=skip_dropout)

    def reset_parameters(self):
        self.conv1.reset_parameters()
        self.conv2.reset_parameters()

    def forward(self, data):
        x, edge_index = data.x, data.edge_index
        x = F.relu(self.conv1(x, edge_index))
        x = F.dropout(x, p=self.dropout, training=self.training)
        x = self.conv2(x, edge_index)
        return F.log_softmax(x, dim=-1)

    def __repr__(self):
        return self.__class__.__name__
예제 #4
0
    def __init__(self, in_channels, hidden_channels, out_channels, num_layers,
                 T, K, dropout):
        super(ARMANet, self).__init__()

        self.convs = torch.nn.ModuleList()
        self.convs.append(
            ARMAConv(in_channels,
                     hidden_channels,
                     num_stacks=K,
                     num_layers=T,
                     dropout=dropout,
                     shared_weights=False))
        for l in range(num_layers - 2):
            self.convs.append(
                ARMAConv(hidden_channels,
                         hidden_channels,
                         num_stacks=K,
                         num_layers=T,
                         dropout=dropout,
                         shared_weights=False))
        self.convs.append(
            ARMAConv(hidden_channels,
                     out_channels,
                     num_stacks=K,
                     num_layers=T,
                     dropout=dropout,
                     shared_weights=False))
예제 #5
0
    def __init__(self, d=3):
        super(ARMAPolicyNetwork, self).__init__()

        self.conv1 = ARMAConv(d,
                              16,
                              num_stacks=3,
                              num_layers=2,
                              shared_weights=True,
                              dropout=0.1)

        self.conv2 = ARMAConv(16,
                              16,
                              num_stacks=3,
                              num_layers=2,
                              shared_weights=True,
                              dropout=0.1,
                              act=None)

        self.conv3 = ARMAConv(16,
                              1,
                              num_stacks=3,
                              num_layers=2,
                              shared_weights=True,
                              dropout=0.1,
                              act=None).to(device)

        self.fc = nn.Linear(16, 1).to(device)
예제 #6
0
class Net(torch.nn.Module):
    def __init__(self, dataset):
        super(Net, self).__init__()
        self.conv1 = ARMAConv(dataset.num_features,
                              args.hidden,
                              args.num_stacks,
                              args.num_layers,
                              args.shared_weights,
                              dropout=args.skip_dropout)
        self.conv2 = ARMAConv(args.hidden,
                              dataset.num_classes,
                              args.num_stacks,
                              args.num_layers,
                              args.shared_weights,
                              dropout=args.skip_dropout)

    def reset_parameters(self):
        self.conv1.reset_parameters()
        self.conv2.reset_parameters()

    def forward(self, data):
        x, edge_index = data.x, data.edge_index
        x = F.relu(self.conv1(x, edge_index))
        x = F.dropout(x, p=args.dropout, training=self.training)
        x = self.conv2(x, edge_index)
        return F.log_softmax(x, dim=1)
    def __init__(self, nfeat, nhid, nclass, dropout, nlayer=2):
        super(ARMA2, self).__init__()
        self.conv1 = ARMAConv(nfeat, nhid)
        self.conv2 = ARMAConv(nhid, nclass)

        self.dropout_p = dropout
        self.sig = nn.Sigmoid()
 def __init__(self, nfeat, nhid, nclass, dropout, nlayer=3):
     super(ARMAX, self).__init__()
     self.conv1 = ARMAConv(nfeat, nhid)
     self.conv2 = ARMAConv(nhid, nclass)
     self.convx = nn.ModuleList(
         [ARMAConv(nhid, nhid) for _ in range(nlayer - 2)])
     self.dropout_p = dropout
예제 #9
0
def test_lazy_arma_conv():
    x = torch.randn(4, 16)
    edge_index = torch.tensor([[0, 0, 0, 1, 2, 3], [1, 2, 3, 0, 0, 0]])

    conv = ARMAConv(-1, 32, num_stacks=8, num_layers=4)
    assert conv.__repr__() == 'ARMAConv(-1, 32, num_stacks=8, num_layers=4)'
    out = conv(x, edge_index)
    assert out.size() == (4, 32)
예제 #10
0
    def __init__(self):
        super(Net, self).__init__()

        self.conv1 = ARMAConv(dataset.num_features, 16, num_stacks=3,
                              num_layers=2, shared_weights=True, dropout=0.25)

        self.conv2 = ARMAConv(16, dataset.num_classes, num_stacks=3,
                              num_layers=2, shared_weights=True, dropout=0.25,
                              act=None)
예제 #11
0
    def __init__(self, in_channels, hidden_channels, out_channels):
        super().__init__()

        self.conv1 = ARMAConv(in_channels, hidden_channels, num_stacks=3,
                              num_layers=2, shared_weights=True, dropout=0.25)

        self.conv2 = ARMAConv(hidden_channels, out_channels, num_stacks=3,
                              num_layers=2, shared_weights=True, dropout=0.25,
                              act=lambda x: x)
예제 #12
0
 def __init__(self,
              features_num=16,
              num_class=2,
              dropout=0.2,
              num_layers=2,
              hidden=16):
     super(ARMA, self).__init__()
     self.conv1 = ARMAConv(features_num, hidden)
     self.conv2 = ARMAConv(hidden, num_class)
     self.dropout = dropout
예제 #13
0
 def __init__(self, dataset):
     super(Net, self).__init__()
     self.conv1 = ARMAConv(dataset.num_features,
                           args.hidden,
                           args.num_stacks,
                           args.num_layers,
                           args.shared_weights,
                           dropout=args.skip_dropout)
     self.conv2 = ARMAConv(args.hidden,
                           dataset.num_classes,
                           args.num_stacks,
                           args.num_layers,
                           args.shared_weights,
                           dropout=args.skip_dropout)
예제 #14
0
    def __init__(self, y_size, num_features=1):
        self.num_features = num_features
        self.y_size = y_size
        super(Net, self).__init__()

        self.conv1 = ARMAConv(self.num_features, 128)
        self.bn1 = torch.nn.BatchNorm1d(128)
        self.pool1 = TopKPooling(128, ratio=0.8)

        self.conv2 = ARMAConv(128, 128)
        self.bn2 = torch.nn.BatchNorm1d(128)
        self.pool2 = TopKPooling(128, ratio=0.8)

        self.conv3 = ARMAConv(128, 128)
        self.bn3 = torch.nn.BatchNorm1d(128)
        self.pool3 = TopKPooling(128, ratio=0.8)

        self.conv4 = ARMAConv(128, 128)
        self.bn4 = torch.nn.BatchNorm1d(128)
        self.pool4 = TopKPooling(128, ratio=0.8)

        self.conv5 = ARMAConv(128, 128)
        self.bn5 = torch.nn.BatchNorm1d(128)
        self.pool5 = TopKPooling(128, ratio=0.8)

        self.conv6 = ARMAConv(128, 128)
        self.bn6 = torch.nn.BatchNorm1d(128)
        self.pool6 = TopKPooling(128, ratio=0.8)

        self.conv7 = ARMAConv(128, 128)
        self.bn7 = torch.nn.BatchNorm1d(128)
        self.pool7 = TopKPooling(128, ratio=0.8)

        self.conv8 = ARMAConv(128, 128)
        self.bn8 = torch.nn.BatchNorm1d(128)
        self.pool8 = TopKPooling(128, ratio=0.8)

        self.conv9 = ARMAConv(128, 128)
        self.bn9 = torch.nn.BatchNorm1d(128)
        self.pool9 = TopKPooling(128, ratio=0.8)

        self.conv10 = ARMAConv(128, 128)
        self.bn10 = torch.nn.BatchNorm1d(128)
        self.pool10 = TopKPooling(128, ratio=0.8)

        self.lin1 = torch.nn.Linear(2560, 1280)
        self.lin2 = torch.nn.Linear(1280, self.y_size)

        self.act = torch.nn.PReLU()
예제 #15
0
파일: arma.py 프로젝트: JunweiSUN/AutoGRL
    def __init__(self):
        super(Net, self).__init__()

        self.conv1 = ARMAConv(data.x.shape[1],
                              16,
                              num_stacks=3,
                              num_layers=2,
                              dropout=0.25)

        self.conv2 = ARMAConv(16,
                              int(max(data.y)) + 1,
                              num_stacks=3,
                              num_layers=2,
                              dropout=0.25,
                              act=lambda x: x)
예제 #16
0
    def __init__(self, in_channels, out_channels, aggr_type, conv_type):
        super(GNNLayer, self).__init__()
        self.in_channels = in_channels
        self.out_channels = out_channels
        self.conv_type = conv_type

        if self.conv_type.startswith('gat'):
            heads = int(self.conv_type[4:])
            self.conv = GATConv(in_channels, out_channels, heads=heads, concat=False)
        elif self.conv_type == 'gcn':
            self.conv = GCNConv(in_channels, out_channels)
        elif self.conv_type == 'sage':
            self.conv = SAGEConv(in_channels, out_channels)
        elif self.conv_type == 'cheb':
            self.conv = ChebConv(in_channels, out_channels, K=2)
        elif self.conv_type == 'tag':
            self.conv = TAGConv(in_channels, out_channels)
        elif self.conv_type == 'arma':
            self.conv = ARMAConv(in_channels, out_channels)
        elif self.conv_type == 'gin':
            self.conv = GINConv(nn.Sequential(nn.Linear(in_channels, out_channels), nn.ReLU(), nn.Linear(out_channels, out_channels)))
        elif self.conv_type == 'appnp':
            self.conv = LinearConv(in_channels, out_channels)

        self.conv.aggr = aggr_type
예제 #17
0
 def __init__(self, features_num, num_class, hidden, num_stacks, num_layers,
              shared_weights, dropout, skip_dropout):
     super(ARMA_Net, self).__init__()
     self.dropout = dropout
     self.conv1 = ARMAConv(features_num,
                           hidden,
                           num_stacks,
                           num_layers,
                           shared_weights,
                           dropout=skip_dropout)
     self.conv2 = ARMAConv(hidden,
                           num_class,
                           num_stacks,
                           num_layers,
                           shared_weights,
                           dropout=skip_dropout)
예제 #18
0
파일: gian.py 프로젝트: zhihy96/Multi-GNN
    def __init__(self, n_features, n_outputs, dim=100):
        super(GIAN, self).__init__()
        
	# the 
        nn1 = Seq(Linear(n_features, 2*dim), ReLU(), Linear(2*dim, dim))
        self.conv1 = GINConv(nn1)
        self.bn1 = torch.nn.BatchNorm1d(dim)
        self.conv2 = ARMAConv(dim, 2*dim)
        self.conv3 = ARMAConv(2*dim, dim)
        
        
        # the Fully Connected Layer
        self.fc1 = Linear(dim, 2*dim)
        self.fc2 = Linear(2*dim, 3*dim)
        self.fc3 = Linear(3*dim, 2*dim)
        self.fc4 = Linear(2*dim, 1)
예제 #19
0
    def __init__(self, num_features, num_classes):
        super(NodeARMA, self).__init__()

        self.conv1 = ARMAConv(num_features,
                              16,
                              num_stacks=3,
                              num_layers=2,
                              shared_weights=True,
                              dropout=0.25)

        self.conv2 = ARMAConv(16,
                              num_classes,
                              num_stacks=3,
                              num_layers=2,
                              shared_weights=True,
                              dropout=0.25,
                              act=lambda x: x)
예제 #20
0
 def __init__(self, num_features, n_classes, num_hidden, num_hidden_layers,activation, dropout=0, num_stacks=1, num_layers=1, shared_weights=False, bias=True):
     super(PARMA, self).__init__()
     # dropout
     if dropout:
         self.dropout = nn.Dropout(p=dropout)
     else:
         self.dropout = nn.Dropout(p=0.)
     #activation
     self.activation = activation
     # input layer
     self.conv_input = ARMAConv(num_features, num_hidden, num_stacks=num_stacks, num_layers=num_layers, shared_weights=shared_weights, bias=bias)
     # Hidden layers
     self.layers = nn.ModuleList()
     for _ in range(num_hidden_layers):
         self.layers.append(ARMAConv(num_hidden, num_hidden, num_stacks=num_stacks, num_layers=num_layers, shared_weights=shared_weights, bias=bias))
     # output layer
     self.conv_output = ARMAConv(num_hidden, n_classes, num_stacks=num_stacks, num_layers=num_layers, shared_weights=shared_weights, bias=bias)
예제 #21
0
    def __init__(self, cur_dim, hidden_dim, output_dim, multi_head):
        super(NasPhy10000Cell, self).__init__()
        self._cur_dim = cur_dim
        self._hidden_dim = hidden_dim
        self._output_dim = output_dim

        self.preprocessor = nn.Linear(cur_dim, hidden_dim)
        self.linear = nn.Linear(hidden_dim, output_dim)
        self.arma = ARMAConv(output_dim, output_dim)
예제 #22
0
    def __init__(self, cur_dim, hidden_dim, output_dim, multi_head):
        super(NasAutoGraphBCell, self).__init__()
        self._cur_dim = cur_dim
        self._hidden_dim = hidden_dim
        self._output_dim = output_dim

        self.preprocessor = nn.Linear(cur_dim, hidden_dim)
        self.arma = ARMAConv(hidden_dim, output_dim)
        self.sage = SAGEConv(hidden_dim, self._output_dim, bias=True)
예제 #23
0
def test_arma_conv():
    x = torch.randn(4, 16)
    edge_index = torch.tensor([[0, 0, 0, 1, 2, 3], [1, 2, 3, 0, 0, 0]])
    row, col = edge_index
    adj = SparseTensor(row=row, col=col, sparse_sizes=(4, 4))

    conv = ARMAConv(16, 32, num_stacks=8, num_layers=4)
    assert conv.__repr__() == 'ARMAConv(16, 32, num_stacks=8, num_layers=4)'
    out = conv(x, edge_index)
    assert out.size() == (4, 32)
    assert conv(x, adj.t()).tolist() == out.tolist()

    t = '(Tensor, Tensor, OptTensor) -> Tensor'
    jit = torch.jit.script(conv.jittable(t))
    assert jit(x, edge_index).tolist() == out.tolist()

    t = '(Tensor, SparseTensor, OptTensor) -> Tensor'
    jit = torch.jit.script(conv.jittable(t))
    assert torch.allclose(conv(x, adj.t()), out, atol=1e-6)
예제 #24
0
파일: utils.py 프로젝트: CoMorvan/GraphNN
    def __init__(self):
        super(ARMA, self).__init__()

        self.conv1 = ARMAConv(75,
                              16,
                              num_stacks=3,
                              num_layers=2,
                              shared_weights=True,
                              dropout=0.25)

        self.conv2 = ARMAConv(16,
                              64,
                              num_stacks=3,
                              num_layers=2,
                              shared_weights=True,
                              dropout=0.25,
                              act=None)

        self.gather_layer = nn.Linear(64, 1)
예제 #25
0
    def __init__(self, his_dim, cur_dim, hidden_dim, output_dim, multi_head):
        super(NasAzcsCell, self).__init__()
        self._cur_dim = cur_dim
        self._hidden_dim = hidden_dim
        self._output_dim = output_dim

        self.preprocessor_x = nn.Linear(cur_dim, hidden_dim)
        self.headers = 2 if multi_head else 1
        self.sg = SAGEConv(hidden_dim, output_dim)
        self.arma = ARMAConv(hidden_dim, output_dim)
    def __init__(self, dataset, embedding_layer, hidden_dim = cmd_args.hidden_dim):
        super().__init__()

        self.embedding_layer = embedding_layer
        self.edge_offset = dataset.attr_encoder.edge_offset
        self.conv = ARMAConv(hidden_dim, hidden_dim, num_layers=4)

        self.lin1 = torch.nn.Linear(hidden_dim, hidden_dim)
        self.lin2 = torch.nn.Linear(hidden_dim, int(hidden_dim/2))
        self.lin3 = torch.nn.Linear(int(hidden_dim/2), cmd_args.embedding_dim)
예제 #27
0
    def __init__(self, in_feats, hid_feats, out_feats):
        super(BUrumorARMA, self).__init__()
        self.conv1 = ARMAConv(in_feats,
                              hid_feats,
                              num_stacks=1,
                              num_layers=1,
                              shared_weights=False,
                              act=F.relu(),
                              dropout=0,
                              bias=True)

        self.conv2 = ARMAConv(hid_feats + in_feats,
                              out_feats,
                              num_stacks=1,
                              num_layers=1,
                              shared_weights=False,
                              act=F.relu(),
                              dropout=0,
                              bias=True)
예제 #28
0
    def __init__(self, his_dim, cur_dim, hidden_dim, output_dim, multi_head):
        super(NasAutoGraphDCell, self).__init__()
        self._cur_dim = cur_dim
        self._hidden_dim = hidden_dim
        self._output_dim = output_dim

        self.preprocessor_h = nn.Linear(his_dim, hidden_dim)
        self.preprocessor_x = nn.Linear(cur_dim, hidden_dim)
        self.sg = SGConv(hidden_dim, output_dim)
        self.arma = ARMAConv(hidden_dim, output_dim)
예제 #29
0
    def __init__(self, his_dim, cur_dim, hidden_dim, output_dim, multi_head):
        super(NasPubmedCell, self).__init__()
        self._cur_dim = cur_dim
        self._hidden_dim = hidden_dim
        self._output_dim = output_dim

        self.preprocessor_h = nn.Linear(his_dim, hidden_dim)
        self.preprocessor_x = nn.Linear(cur_dim, hidden_dim)
        self.headers = 8 if multi_head else 1
        self.gat8 = GATConv(hidden_dim, output_dim, heads=self.headers)
        self.arma = ARMAConv(hidden_dim, output_dim)
예제 #30
0
    def __init__(self, his_dim, cur_dim, hidden_dim, output_dim, multi_head):
        super(NasAzpoCell, self).__init__()
        self._cur_dim = cur_dim
        self._hidden_dim = hidden_dim
        self._output_dim = output_dim

        self.preprocessor_h = nn.Linear(his_dim, hidden_dim)
        self.preprocessor_x = nn.Linear(cur_dim, hidden_dim)
        self.cheb = ChebConv(hidden_dim, output_dim, K=2, bias=True)
        self.arma = ARMAConv(hidden_dim, output_dim)
        self.linear = nn.Linear(output_dim, output_dim)