Пример #1
0
    def __init__(self,
                 num_layers=2,
                 hidden=16,
                 features_num=16,
                 num_class=2,
                 droprate=0.5,
                 dim=1,
                 kernel_size=2,
                 edge_droprate=0.0,
                 fea_norm="no_norm",
                 K=20,
                 alpha=0.5):
        super(SplineGCN, self).__init__()
        self.droprate = droprate
        self.edge_droprate = edge_droprate
        if fea_norm == "no_norm":
            self.fea_norm_layer = None
        elif fea_norm == "graph_size_norm":
            self.fea_norm_layer = GraphSizeNorm()
        else:
            raise ValueError("your fea_norm is un-defined: %s") % fea_norm

        self.convs = torch.nn.ModuleList()
        self.convs.append(SplineConv(features_num, hidden, dim, kernel_size))
        for i in range(num_layers - 2):
            self.convs.append(SplineConv(hidden, hidden, dim, kernel_size))
        self.convs.append(SplineConv(hidden, num_class, dim, kernel_size))

        self.appnp = APPNP(K, alpha)
Пример #2
0
    def __init__(self, num_features, num_classes, training_method='dfa'):
        super(DFASplineNet, self).__init__()
        self.conv1 = SplineConv(num_features, 16, dim=1, kernel_size=2)
        self.dfa_1 = DFALayer()
        self.conv2 = SplineConv(16, num_classes, dim=1, kernel_size=2)

        self.dfa = DFA(dfa_layers=[self.dfa_1], no_training=training_method != 'dfa')
Пример #3
0
 def __init__(self, prune=False):
     super(Net, self).__init__()
     self.conv1 = SplineConv(dataset.num_features, 16, dim=1, kernel_size=2)
     self.conv2 = SplineConv(16, dataset.num_classes, dim=1, kernel_size=2)
     self.prune = prune
     if prune == 'data':
         self.prunedata()
 def __init__(self):
     super(Net, self).__init__()
     self.conv1 = SplineConv(1, 32, dim=2, kernel_size=5)
     self.conv2 = SplineConv(32, 64, dim=2, kernel_size=5)
     self.conv3 = SplineConv(64, 64, dim=2, kernel_size=5)
     self.fc1 = torch.nn.Linear(4 * 64, 128)
     self.fc2 = torch.nn.Linear(128, 10)
Пример #5
0
    def layers(self):
        # TODO adapt to per-layer configurability
        self.layers_list = torch.nn.ModuleList()

        conv_in = SplineConv(in_channels=self.config.feature_dimensionality,
                             out_channels=self.config.hidden_units,
                             dim=self.config.pseudo_dimensionality,
                             kernel_size=self.config.kernel_size,
                             norm=False,
                             root_weight=False,
                             bias=self.config.use_bias)

        self.layers_list.append(conv_in)

        for i in range(self.config.hidden_layers):
            l = SplineConv(in_channels=self.config.hidden_units,
                           out_channels=self.config.hidden_units,
                           dim=self.config.pseudo_dimensionality,
                           kernel_size=self.config.kernel_size,
                           norm=False,
                           root_weight=False,
                           bias=self.config.use_bias)
            self.layers_list.append(l)

        conv_out = SplineConv(in_channels=self.config.hidden_units,
                              out_channels=self.model_type.out_channels,
                              dim=self.config.pseudo_dimensionality,
                              kernel_size=self.config.kernel_size,
                              norm=False,
                              root_weight=False,
                              bias=self.config.use_bias)

        self.layers_list.append(conv_out)
Пример #6
0
 def __init__(self):
     super(Net, self).__init__()
     self.conv1 = SplineConv(d.num_features, 32, dim=2, kernel_size=5)
     self.conv2 = SplineConv(32, 64, dim=2, kernel_size=5)
     self.conv3 = SplineConv(64, 64, dim=2, kernel_size=5)
     self.fc1 = torch.nn.Linear(4 * 64, 128)
     self.fc2 = torch.nn.Linear(128, d.num_classes)
Пример #7
0
 def __init__(self,
              num_in_features,
              num_outp_features,
              mid_features,
              kernel=3,
              dim=3,
              batchnorm1=True):
     super(SplineBlock, self).__init__()
     self.batchnorm1 = batchnorm1
     self.conv1 = SplineConv(num_in_features,
                             mid_features,
                             dim,
                             kernel,
                             is_open_spline=False)
     if self.batchnorm1:
         self.batchnorm1 = torch.nn.BatchNorm1d(mid_features)
     self.conv2 = SplineConv(mid_features,
                             2 * mid_features,
                             dim,
                             kernel,
                             is_open_spline=False)
     self.batchnorm2 = torch.nn.BatchNorm1d(2 * mid_features)
     self.conv3 = SplineConv(2 * mid_features + 3,
                             num_outp_features,
                             dim,
                             kernel,
                             is_open_spline=False)
Пример #8
0
    def __init__(self, input_size, output_size):
        super(GCNLayer, self).__init__()

        if input_size != output_size:
            raise AttributeError('input size must equal output size')

        self.conv1 = SplineConv(input_size, output_size, dim=1, kernel_size=2).to(device)
        self.conv2 = SplineConv(input_size, output_size, dim=1, kernel_size=2).to(device)
    def __init__(self, numFeatures, numClasses):

        super().__init__()

        self.conv1 = SplineConv(numFeatures, 32, 3, 5)
        self.conv2 = SplineConv(32, 64, 3, 5)
        self.fc1 = torch.nn.Linear(768, 128)
        self.fc2 = torch.nn.Linear(128, numClasses * 1)
Пример #10
0
    def __init__(self):
        super(Net, self).__init__()

        self.func = GCNLayer(input_size=64, output_size=64)

        self.conv1 = SplineConv(dataset.num_features, 64, dim=1, kernel_size=2).to(device)
        self.neuralDE = NeuralDE(self.func, solver='rk4', s_span=torch.linspace(0, 1, 3)).to(device)
        self.conv2 = SplineConv(64, dataset.num_classes, dim=1, kernel_size=2).to(device)
Пример #11
0
def test_spline_conv():
    in_channels, out_channels = (16, 32)
    edge_index = torch.tensor([[0, 0, 0, 1, 2, 3], [1, 2, 3, 0, 0, 0]])
    num_nodes = edge_index.max().item() + 1
    x = torch.randn((num_nodes, in_channels))
    pseudo = torch.rand((edge_index.size(1), 3))

    conv = SplineConv(in_channels, out_channels, dim=3, kernel_size=5)
    assert conv.__repr__() == 'SplineConv(16, 32)'
    assert conv(x, edge_index, pseudo).size() == (num_nodes, out_channels)
Пример #12
0
    def __init__(self, num_classes):
        super().__init__()

        self.conv1 = SplineConv(1, 64, dim=3, kernel_size=5)
        self.conv2 = SplineConv(64, 64, dim=3, kernel_size=5)
        self.conv3 = SplineConv(64, 128, dim=3, kernel_size=5)

        self.lin1 = Lin(128, 256)
        self.lin2 = Lin(256, 256)
        self.lin3 = Lin(256, num_classes)
Пример #13
0
 def __init__(self, datasetroot, width):
     super(SPlineNet, self).__init__()
     self.conv1 = SplineConv(datasetroot.num_features,
                             32,
                             dim=2,
                             kernel_size=5)
     self.conv2 = SplineConv(32, 64, dim=2, kernel_size=5)
     self.lin1 = torch.nn.Linear(64, width[0])
     self.lin2 = torch.nn.Linear(width[0], width[1])
     self.lin3 = torch.nn.Linear(width[1], datasetroot.num_classes)
Пример #14
0
 def __init__(self,num_features,num_classes, width):
     super(SplineNet, self).__init__()
     self.NumLayers=len(width)
     self.layers = nn.ModuleList()
     self.layers.append(SplineConv(num_features, width[0], dim=1, kernel_size=2))
     for i in range(self.NumLayers-1):
         layer=SplineConv(width[i],width[i+1], dim=1, kernel_size=2)
         nn.init.xavier_uniform_(layer.weight)
         self.layers.append(layer)
     self.layers.append(SplineConv(width[-1],num_classes, dim=1, kernel_size=2))
Пример #15
0
 def __init__(self):
     super(Net, self).__init__()
     self.conv1 = SplineConv(1, 64, dim=2, kernel_size=5)
     self.bn1 = torch.nn.BatchNorm1d(64)
     self.conv2 = SplineConv(64, 128, dim=2, kernel_size=5)
     self.bn2 = torch.nn.BatchNorm1d(128)
     self.conv3 = SplineConv(128, 256, dim=2, kernel_size=5)
     self.bn3 = torch.nn.BatchNorm1d(256)
     self.conv4 = SplineConv(256, 512, dim=2, kernel_size=5)
     self.bn4 = torch.nn.BatchNorm1d(512)
     self.fc1 = torch.nn.Linear(64 * 512, 1024)
     self.fc2 = torch.nn.Linear(1024, 10)
    def __init__(self, input_planes, planes, stride=1, dim_change=None):
        super(bottleNeck, self).__init__()

        self.conv1 = SplineConv(input_planes, planes, dim=2, kernel_size=1)
        self.bn1 = torch.nn.BatchNorm1d(planes)
        self.conv2 = SplineConv(planes, planes, dim=2, kernel_size=3)
        self.bn2 = torch.nn.BatchNorm1d(planes)
        self.conv3 = SplineConv(planes,
                                planes * self.expansion,
                                dim=2,
                                kernel_size=1)
        self.bn3 = torch.nn.BatchNorm1d(planes * self.expansion)
        self.dim_change = dim_change
Пример #17
0
def test_spline_conv():
    warnings.filterwarnings('ignore', '.*non-optimized CPU version.*')

    x1 = torch.randn(4, 8)
    x2 = torch.randn(2, 16)
    edge_index = torch.tensor([[0, 1, 2, 3], [0, 0, 1, 1]])
    row, col = edge_index
    value = torch.rand(row.size(0), 3)
    adj = SparseTensor(row=row, col=col, value=value, sparse_sizes=(4, 4))

    conv = SplineConv(8, 32, dim=3, kernel_size=5)
    assert conv.__repr__() == 'SplineConv(8, 32, dim=3)'
    out = conv(x1, edge_index, value)
    assert out.size() == (4, 32)
    assert torch.allclose(conv(x1, edge_index, value, size=(4, 4)), out)
    assert torch.allclose(conv(x1, adj.t()), out)

    if is_full_test():
        t = '(Tensor, Tensor, OptTensor, Size) -> Tensor'
        jit = torch.jit.script(conv.jittable(t))
        assert torch.allclose(jit(x1, edge_index, value), out)
        assert torch.allclose(jit(x1, edge_index, value, size=(4, 4)), out)

        t = '(Tensor, SparseTensor, OptTensor, Size) -> Tensor'
        jit = torch.jit.script(conv.jittable(t))
        assert torch.allclose(jit(x1, adj.t()), out)

    adj = adj.sparse_resize((4, 2))
    conv = SplineConv((8, 16), 32, dim=3, kernel_size=5)
    assert conv.__repr__() == 'SplineConv((8, 16), 32, dim=3)'
    out1 = conv((x1, x2), edge_index, value)
    out2 = conv((x1, None), edge_index, value, (4, 2))
    assert out1.size() == (2, 32)
    assert out2.size() == (2, 32)
    assert torch.allclose(conv((x1, x2), edge_index, value, (4, 2)), out1)
    assert torch.allclose(conv((x1, x2), adj.t()), out1)
    assert torch.allclose(conv((x1, None), adj.t()), out2)

    if is_full_test():
        t = '(OptPairTensor, Tensor, OptTensor, Size) -> Tensor'
        jit = torch.jit.script(conv.jittable(t))
        assert torch.allclose(jit((x1, x2), edge_index, value), out1)
        assert torch.allclose(jit((x1, x2), edge_index, value, size=(4, 2)),
                              out1)
        assert torch.allclose(jit((x1, None), edge_index, value, size=(4, 2)),
                              out2)

        t = '(OptPairTensor, SparseTensor, OptTensor, Size) -> Tensor'
        jit = torch.jit.script(conv.jittable(t))
        assert torch.allclose(jit((x1, x2), adj.t()), out1)
        assert torch.allclose(jit((x1, None), adj.t()), out2)
 def __init__(self):
     super(Net, self).__init__()
     if model_name == 'GCN':
         self.conv1 = GCNConv(dataset_train.num_node_features, 16)
         self.conv2 = GCNConv(16, dataset_train.num_node_labels)
     elif model_name == 'Spline':
         self.conv1 = SplineConv(dataset_train.num_node_features,
                                 16,
                                 dim=1,
                                 kernel_size=2)
         self.conv2 = SplineConv(16,
                                 dataset_train.num_node_labels,
                                 dim=1,
                                 kernel_size=2)
    def test_spline_conv(self):
        in_channels, out_channels = (16, 32)
        edge_index = torch.tensor([[0, 0, 0, 1, 2, 3], [1, 2, 3, 0, 0,
                                                        0]]).to(device)
        num_nodes = edge_index.max().item() + 1
        x = torch.randn((num_nodes, in_channels)).to(device)
        pseudo = torch.rand((edge_index.size(1), 3)).to(device)

        conv = SplineConv(in_channels, out_channels, dim=3,
                          kernel_size=5).to(device)
        self.assertEqual('SplineConv(16, 32)', conv.__repr__())
        with torch_geometric.debug():
            self.assertEqual((num_nodes, out_channels),
                             conv(x, edge_index, pseudo).size())
Пример #20
0
def test_spline_conv():
    in_channels, out_channels = (16, 32)
    edge_index = torch.tensor([[0, 0, 0, 1, 2, 3], [1, 2, 3, 0, 0, 0]])
    num_nodes = edge_index.max().item() + 1
    x = torch.randn((num_nodes, in_channels))
    pseudo = torch.rand((edge_index.size(1), 3))

    conv = SplineConv(in_channels, out_channels, dim=3, kernel_size=5)
    assert conv.__repr__() == 'SplineConv(16, 32, dim=3)'
    out = conv(x, edge_index, pseudo)
    assert out.size() == (num_nodes, out_channels)

    jit_conv = conv.jittable(x=x, edge_index=edge_index, pseudo=pseudo)
    jit_conv = torch.jit.script(jit_conv)
    assert jit_conv(x, edge_index, pseudo).tolist() == out.tolist()
    def __init__(self,
                 in_channels,
                 out_channels,
                 dim,
                 num_layers,
                 cat=True,
                 lin=True,
                 dropout=0.0):
        super(SplineCNN, self).__init__()

        self.in_channels = in_channels
        self.dim = dim
        self.num_layers = num_layers
        self.cat = cat
        self.lin = lin
        self.dropout = dropout

        self.convs = torch.nn.ModuleList()
        for _ in range(num_layers):
            conv = SplineConv(in_channels, out_channels, dim, kernel_size=5)
            self.convs.append(conv)
            in_channels = out_channels

        if self.cat:
            in_channels = self.in_channels + num_layers * out_channels
        else:
            in_channels = out_channels

        if self.lin:
            self.out_channels = out_channels
            self.final = Lin(in_channels, out_channels)
        else:
            self.out_channels = in_channels

        self.reset_parameters()
Пример #22
0
def test_spline_conv():
    x1 = torch.randn(4, 8)
    x2 = torch.randn(2, 16)
    edge_index = torch.tensor([[0, 1, 2, 3], [0, 0, 1, 1]])
    row, col = edge_index
    value = torch.rand(row.size(0), 3)
    adj = SparseTensor(row=row, col=col, value=value, sparse_sizes=(4, 4))

    conv = SplineConv(8, 32, dim=3, kernel_size=5)
    assert conv.__repr__() == 'SplineConv(8, 32, dim=3)'
    out = conv(x1, edge_index, value)
    assert out.size() == (4, 32)
    assert conv(x1, edge_index, value, size=(4, 4)).tolist() == out.tolist()
    assert conv(x1, adj.t()).tolist() == out.tolist()

    t = '(Tensor, Tensor, OptTensor, Size) -> Tensor'
    jit = torch.jit.script(conv.jittable(t))
    assert jit(x1, edge_index, value).tolist() == out.tolist()
    assert jit(x1, edge_index, value, size=(4, 4)).tolist() == out.tolist()

    t = '(Tensor, SparseTensor, OptTensor, Size) -> Tensor'
    jit = torch.jit.script(conv.jittable(t))
    assert jit(x1, adj.t()).tolist() == out.tolist()

    adj = adj.sparse_resize((4, 2))
    conv = SplineConv((8, 16), 32, dim=3, kernel_size=5)
    assert conv.__repr__() == 'SplineConv((8, 16), 32, dim=3)'
    out1 = conv((x1, x2), edge_index, value)
    out2 = conv((x1, None), edge_index, value, (4, 2))
    assert out1.size() == (2, 32)
    assert out2.size() == (2, 32)
    assert conv((x1, x2), edge_index, value, (4, 2)).tolist() == out1.tolist()
    assert conv((x1, x2), adj.t()).tolist() == out1.tolist()
    assert conv((x1, None), adj.t()).tolist() == out2.tolist()

    t = '(OptPairTensor, Tensor, OptTensor, Size) -> Tensor'
    jit = torch.jit.script(conv.jittable(t))
    assert jit((x1, x2), edge_index, value).tolist() == out1.tolist()
    assert jit((x1, x2), edge_index, value,
               size=(4, 2)).tolist() == out1.tolist()
    assert jit((x1, None), edge_index, value,
               size=(4, 2)).tolist() == out2.tolist()

    t = '(OptPairTensor, SparseTensor, OptTensor, Size) -> Tensor'
    jit = torch.jit.script(conv.jittable(t))
    assert jit((x1, x2), adj.t()).tolist() == out1.tolist()
    assert jit((x1, None), adj.t()).tolist() == out2.tolist()
Пример #23
0
 def __init__(self, ratio, r, in_channels, out_channels, dim, kernel_size):
     super(Downsampling, self).__init__()
     self.ratio = ratio
     self.r = r
     self.conv = SplineConv(in_channels,
                            out_channels,
                            dim=dim,
                            kernel_size=kernel_size)
Пример #24
0
    def __init__(self, D, C, G=0, task='graph'):
        super(SplineNet, self).__init__()

        self.D = D
        self.C = C
        self.G = G

        self.conv1 = SplineConv(self.D, self.D, dim=1, degree=1, kernel_size=3)
        self.conv2 = SplineConv(self.D, self.D, dim=1, degree=1, kernel_size=5)

        if (self.G > 0):
            self.Z = self.D + self.G
        else:
            self.Z = self.D

        self.mlp1 = Linear(self.Z, self.Z)
        self.mlp2 = Linear(self.Z, self.C)
Пример #25
0
    def __init__(self, in_channel, out_channel):
        super(ResidualBlock, self).__init__()
        self.left_conv1 = SplineConv(in_channel,
                                     out_channel,
                                     dim=2,
                                     kernel_size=5)
        self.left_bn1 = torch.nn.BatchNorm1d(out_channel)
        self.left_conv2 = SplineConv(out_channel,
                                     out_channel,
                                     dim=2,
                                     kernel_size=5)
        self.left_bn2 = torch.nn.BatchNorm1d(out_channel)

        self.shortcut_conv = SplineConv(in_channel,
                                        out_channel,
                                        dim=2,
                                        kernel_size=1)
        self.shortcut_bn = torch.nn.BatchNorm1d(out_channel)
Пример #26
0
 def __init__(self, filter_nr, l, k, activation, nr_points):
     super(DirectionalSplineConvNoF, self).__init__()
     self.nr_points = nr_points
     self.k = k
     self.l = l if l <= k else k
     self.activation = activation
     self.filter_nr = filter_nr
     self.conv = SplineConv(1, self.filter_nr, dim=3, kernel_size=15)
     self.bn = BatchNorm1d(self.filter_nr)
Пример #27
0
    def __init__(self):
        super(GFCND, self).__init__()
        self.down1 = Downsampling(k_range=32,
                                  ratio=0.5,
                                  in_channels=1,
                                  out_channels=32,
                                  dim=2,
                                  kernel_size=5,
                                  batch_norm=False)
        self.down2 = Downsampling(k_range=64,
                                  ratio=0.5,
                                  in_channels=32,
                                  out_channels=64,
                                  dim=2,
                                  kernel_size=3)
        self.down3 = Downsampling(k_range=128,
                                  ratio=0.5,
                                  in_channels=64,
                                  out_channels=128,
                                  dim=2,
                                  kernel_size=3)
        self.up1 = Upsampling(k=3,
                              in_channels=128,
                              out_channels=64,
                              dim=2,
                              kernel_size=3)
        self.score_fs = SplineConv(64, 32, dim=2, kernel_size=3)

        self.up2 = Upsampling(k=3,
                              in_channels=32,
                              out_channels=32,
                              dim=2,
                              kernel_size=5,
                              conv_layer=False)
        self.up3 = Upsampling(k=3,
                              in_channels=32,
                              out_channels=32,
                              dim=2,
                              kernel_size=5,
                              conv_layer=False)

        self.score_pool2 = SplineConv(64, 32, dim=2, kernel_size=3)
        self.convout = SplineConv(32, 1, dim=2, kernel_size=5)
Пример #28
0
    def __init__(self):
        super(GFCN, self).__init__()
        self.conv1a = SplineConv(1, 32, dim=2, kernel_size=5)
        self.conv1b = SplineConv(32, 32, dim=2, kernel_size=5)
        # self.bn1 = torch.nn.BatchNorm1d(32)

        self.conv2a = SplineConv(32, 64, dim=2, kernel_size=3)
        self.conv2b = SplineConv(64, 64, dim=2, kernel_size=3)
        self.bn2 = torch.nn.BatchNorm1d(64)

        self.conv3a = SplineConv(64, 128, dim=2, kernel_size=3)
        self.conv3b = SplineConv(128, 128, dim=2, kernel_size=1)
        self.bn3 = torch.nn.BatchNorm1d(128)

        self.score_fr1 = SplineConv(128, 64, dim=2, kernel_size=1)
        self.score_fr2 = SplineConv(64, 32, dim=2, kernel_size=1)
        self.score_pool2 = SplineConv(64, 32, dim=2, kernel_size=3)

        self.convout = SplineConv(32, 1, dim=2, kernel_size=5)
Пример #29
0
    def __init__(self):
        super(Net, self).__init__()
        self.conv1 = SplineConv(1, 64, dim=2, kernel_size=5)
        self.bn1 = torch.nn.BatchNorm1d(64)

        self.block1 = ResidualBlock(64, 128)
        self.block2 = ResidualBlock(128, 256)
        self.block3 = ResidualBlock(256, 512)

        self.fc1 = torch.nn.Linear(64 * 512, 1024)
        self.fc2 = torch.nn.Linear(1024, 10)
Пример #30
0
 def makeConv(self, nin, nout, conv_args):
     # FeaStConv
     if(conv_args['name'] == 'FeaSt'):
         return FeaStConv(nin, nout, conv_args["num_heads"])
     
     # SplineConv
     if(conv_args['name'] == 'Spline'):
         return SplineConv(nin, nout, 
             self.edge_dim,
             conv_args["kernel_size"],
             is_open_spline=conv_args["open_spline"],
             degree=conv_args["degree"]
         )
     
     # GMMConv
     if(conv_args['name'] == "GMM"):
         return GMMConv(nin, nout,
             self.edge_dim,
             conv_args["kernel_size"]
         )
     
     # NNConv
     if(conv_args["name"] == "NN"):
         h = nn.Sequential(
                 nn.Linear(self.edge_dim, nin*nout),
                 nn.ReLU()
                 #nn.Linear(int(nin*nout/2), nin*nout)
         )
         return NNConv(nin, nout, h)
     
     # PPFConv
     if(conv_args["name"] == "PPF"):
         cin = nin+4
         hl = nn.Sequential(
             nn.Linear(cin, conv_args['nhidden']),
             nn.ReLU()
         )
         hg = nn.Linear(conv_args['nhidden'], nout)
         #hl = nn.Sequential(
                 #nn.Linear(cin, int(conv_args['nhidden']/2)),
                 #nn.ReLU(),
                 #nn.Linear(int(conv_args['nhidden']/2), conv_args['nhidden'])
         #)
         #hg = nn.Sequential(
                 #nn.Linear(conv_args['nhidden'], nout),
                 #nn.ReLU(),
                 #nn.Linear(nout, nout)
         #)
         return PPFConv(hl, hg)
     
     # CGConv
     if(conv_args["name"] == "CG"):
         return CGConv(nin, self.edge_dim)