示例#1
0
        def __init__(self):
            super(ToyModel, self).__init__()

            self.paconvs = nn.ModuleList()
            self.paconvs.append(PAConv(8, 16, 8))
            self.paconvs.append(PAConv(8, 16, 8, kernel_input='identity'))
            self.paconvs.append(PAConvCUDA(8, 16, 8))

            self.conv1 = nn.Conv1d(3, 8, 1)
示例#2
0
def test_paconv_cuda():
    if not torch.cuda.is_available():
        pytest.skip()
    B = 2
    in_channels = 6
    out_channels = 12
    N = 32
    npoint = 4
    K = 3
    points_xyz = torch.randn(B, 3, npoint, K).float().cuda()
    features = torch.randn(B, in_channels, N).float().cuda()
    points_idx = torch.randint(0, N, (B, npoint, K)).long().cuda()

    paconv = PAConvCUDA(in_channels, out_channels, 4).cuda()

    with torch.no_grad():
        new_features = paconv(points_xyz, features, points_idx)

    assert new_features.shape == torch.Size([B, out_channels, npoint, K])
示例#3
0
def test_paconv_cuda():
    if not torch.cuda.is_available():
        pytest.skip()
    B = 2
    in_channels = 6
    out_channels = 12
    N = 32
    npoint = 4
    K = 3
    num_kernels = 4
    points_xyz = torch.randn(B, 3, npoint, K).float().cuda()
    features = torch.randn(B, in_channels, N).float().cuda()
    points_idx = torch.randint(0, N, (B, npoint, K)).long().cuda()

    paconv = PAConvCUDA(in_channels, out_channels, num_kernels).cuda()
    assert paconv.weight_bank.shape == torch.Size(
        [in_channels * 2, out_channels * num_kernels])

    with torch.no_grad():
        new_features, _, _ = paconv((features, points_xyz, points_idx))

    assert new_features.shape == torch.Size([B, out_channels, npoint, K])
    def __init__(self,
                 num_point,
                 radii,
                 sample_nums,
                 mlp_channels,
                 paconv_num_kernels,
                 fps_mod=['D-FPS'],
                 fps_sample_range_list=[-1],
                 dilated_group=False,
                 norm_cfg=dict(type='BN2d', momentum=0.1),
                 use_xyz=True,
                 pool_mod='max',
                 normalize_xyz=False,
                 bias='auto',
                 paconv_kernel_input='w_neighbor',
                 scorenet_input='w_neighbor_dist',
                 scorenet_cfg=dict(mlp_channels=[8, 16, 16],
                                   score_norm='softmax',
                                   temp_factor=1.0,
                                   last_bn=False)):
        super(PAConvCUDASAModuleMSG,
              self).__init__(num_point=num_point,
                             radii=radii,
                             sample_nums=sample_nums,
                             mlp_channels=mlp_channels,
                             fps_mod=fps_mod,
                             fps_sample_range_list=fps_sample_range_list,
                             dilated_group=dilated_group,
                             use_xyz=use_xyz,
                             pool_mod=pool_mod,
                             normalize_xyz=normalize_xyz,
                             grouper_return_grouped_xyz=True,
                             grouper_return_grouped_idx=True)

        assert len(paconv_num_kernels) == len(mlp_channels)
        for i in range(len(mlp_channels)):
            assert len(paconv_num_kernels[i]) == len(mlp_channels[i]) - 1, \
                'PAConv number of weight kernels wrong'

        # in PAConv, bias only exists in ScoreNet
        scorenet_cfg['bias'] = bias

        # we need to manually concat xyz for CUDA implemented PAConv
        self.use_xyz = use_xyz

        for i in range(len(self.mlp_channels)):
            mlp_channel = self.mlp_channels[i]
            if use_xyz:
                mlp_channel[0] += 3

            num_kernels = paconv_num_kernels[i]

            # can't use `nn.Sequential` for PAConvCUDA because its input and
            # output have different shapes
            mlp = nn.ModuleList()
            for i in range(len(mlp_channel) - 1):
                mlp.append(
                    PAConvCUDA(mlp_channel[i],
                               mlp_channel[i + 1],
                               num_kernels[i],
                               norm_cfg=norm_cfg,
                               kernel_input=paconv_kernel_input,
                               scorenet_input=scorenet_input,
                               scorenet_cfg=scorenet_cfg))
            self.mlps.append(mlp)