def test_point_conv(): in_channels, out_channels = (16, 32) edge_index = torch.tensor([[0, 0, 0, 1, 2, 3], [1, 2, 3, 0, 0, 0]]) num_nodes = edge_index.max().item() + 1 x = torch.randn((num_nodes, in_channels)) pos = torch.rand((num_nodes, 3)) norm = torch.nn.functional.normalize(torch.rand((num_nodes, 3)), dim=1) local_nn = Seq(Lin(in_channels + 4, 32), ReLU(), Lin(32, out_channels)) global_nn = Seq(Lin(out_channels, out_channels)) conv = PPFConv(local_nn, global_nn) assert conv.__repr__() == ( 'PPFConv(local_nn=Sequential(\n' ' (0): Linear(in_features=20, out_features=32, bias=True)\n' ' (1): ReLU()\n' ' (2): Linear(in_features=32, out_features=32, bias=True)\n' '), global_nn=Sequential(\n' ' (0): Linear(in_features=32, out_features=32, bias=True)\n' '))') out = conv(x, pos, norm, edge_index) assert out.size() == (num_nodes, out_channels) jit_conv = conv.jittable(x=x, pos=pos, norm=norm, edge_index=edge_index) jit_conv = torch.jit.script(jit_conv) assert jit_conv(x, pos, norm, edge_index).tolist() == out.tolist()
def makeConv(self, nin, nout, conv_args): # FeaStConv if(conv_args['name'] == 'FeaSt'): return FeaStConv(nin, nout, conv_args["num_heads"]) # SplineConv if(conv_args['name'] == 'Spline'): return SplineConv(nin, nout, self.edge_dim, conv_args["kernel_size"], is_open_spline=conv_args["open_spline"], degree=conv_args["degree"] ) # GMMConv if(conv_args['name'] == "GMM"): return GMMConv(nin, nout, self.edge_dim, conv_args["kernel_size"] ) # NNConv if(conv_args["name"] == "NN"): h = nn.Sequential( nn.Linear(self.edge_dim, nin*nout), nn.ReLU() #nn.Linear(int(nin*nout/2), nin*nout) ) return NNConv(nin, nout, h) # PPFConv if(conv_args["name"] == "PPF"): cin = nin+4 hl = nn.Sequential( nn.Linear(cin, conv_args['nhidden']), nn.ReLU() ) hg = nn.Linear(conv_args['nhidden'], nout) #hl = nn.Sequential( #nn.Linear(cin, int(conv_args['nhidden']/2)), #nn.ReLU(), #nn.Linear(int(conv_args['nhidden']/2), conv_args['nhidden']) #) #hg = nn.Sequential( #nn.Linear(conv_args['nhidden'], nout), #nn.ReLU(), #nn.Linear(nout, nout) #) return PPFConv(hl, hg) # CGConv if(conv_args["name"] == "CG"): return CGConv(nin, self.edge_dim)
def test_ppf_conv(): x1 = torch.randn(4, 16) pos1 = torch.randn(4, 3) pos2 = torch.randn(2, 3) n1 = F.normalize(torch.rand(4, 3), dim=-1) n2 = F.normalize(torch.rand(2, 3), dim=-1) edge_index = torch.tensor([[0, 1, 2, 3], [0, 0, 1, 1]]) row, col = edge_index adj = SparseTensor(row=row, col=col, sparse_sizes=(4, 4)) local_nn = Seq(Lin(16 + 4, 32), ReLU(), Lin(32, 32)) global_nn = Seq(Lin(32, 32)) conv = PPFConv(local_nn, global_nn) assert conv.__repr__() == ( 'PPFConv(local_nn=Sequential(\n' ' (0): Linear(in_features=20, out_features=32, bias=True)\n' ' (1): ReLU()\n' ' (2): Linear(in_features=32, out_features=32, bias=True)\n' '), global_nn=Sequential(\n' ' (0): Linear(in_features=32, out_features=32, bias=True)\n' '))') out = conv(x1, pos1, n1, edge_index) assert out.size() == (4, 32) assert torch.allclose(conv(x1, pos1, n1, adj.t()), out, atol=1e-6) t = '(OptTensor, Tensor, Tensor, Tensor) -> Tensor' jit = torch.jit.script(conv.jittable(t)) assert jit(x1, pos1, n1, edge_index).tolist() == out.tolist() t = '(OptTensor, Tensor, Tensor, SparseTensor) -> Tensor' jit = torch.jit.script(conv.jittable(t)) assert torch.allclose(jit(x1, pos1, n1, adj.t()), out, atol=1e-6) adj = adj.sparse_resize((4, 2)) out = conv(x1, (pos1, pos2), (n1, n2), edge_index) assert out.size() == (2, 32) assert conv((x1, None), (pos1, pos2), (n1, n2), edge_index).tolist() == out.tolist() assert torch.allclose(conv(x1, (pos1, pos2), (n1, n2), adj.t()), out, atol=1e-6) assert torch.allclose(conv((x1, None), (pos1, pos2), (n1, n2), adj.t()), out, atol=1e-6) t = '(PairOptTensor, PairTensor, PairTensor, Tensor) -> Tensor' jit = torch.jit.script(conv.jittable(t)) assert jit((x1, None), (pos1, pos2), (n1, n2), edge_index).tolist() == out.tolist() t = '(PairOptTensor, PairTensor, PairTensor, SparseTensor) -> Tensor' jit = torch.jit.script(conv.jittable(t)) assert torch.allclose(jit((x1, None), (pos1, pos2), (n1, n2), adj.t()), out, atol=1e-6)
def __init__(self, nIn, nOut, conv_args, ratio, radius, max_neighbors=64): super(SAModule, self).__init__() """This module acts as a pooling/conv layer. Taken from pytorch-geometric examples.""" self.ratio = ratio self.r = radius self.K = max_neighbors # set up convolution self.conv_name = conv_args['name'] if conv_args['name'] == 'PointConv': nn = MLP([nIn + 3, int((nIn + 3 + nOut) / 2), nOut]) self.conv = PointConv(nn) elif conv_args['name'] == 'GraphConv': self.conv = GraphConv(nIn, nOut, aggr=conv_args['aggr']) elif conv_args['name'] == 'PPFConv': nn = MLP([nIn + 4, int((nIn + 4 + nOut) / 2), nOut]) self.conv = PPFConv(nn) self.conv.aggr = conv_args['aggr']
def __init__(self, ratio, r, nn): super(SAModule, self).__init__() self.ratio = ratio self.r = r self.conv = PPFConv(nn)