def test_module_conv3d(self, height, width, depth, in_channels, out_channels, with_bias, octrees, lengths, coalescent_features, max_level, pyramids, exsum, point_hierarchies, kernel_vectors, jump, with_spc_to_dict): conv = spc.Conv3d(in_channels, out_channels, kernel_vectors, jump, bias=with_bias).cuda() params = dict(conv.named_parameters()) weight = params['weight'] check_tensor(weight, shape=(kernel_vectors.shape[0], in_channels, out_channels), dtype=torch.float, device='cuda') if with_bias: assert len(params) == 2 bias = params['bias'] check_tensor(bias, shape=(out_channels, ), dtype=torch.float, device='cuda') else: assert len(params) == 1 bias = None buffers = dict(conv.named_buffers()) assert len(buffers) == 1 assert torch.equal(buffers['kernel_vectors'], kernel_vectors) assert repr(conv) == f'Conv3d(in={in_channels}, out={out_channels}, ' \ f'kernel_vector_size={kernel_vectors.shape[0]})' if with_spc_to_dict: input_spc = Spc(octrees, lengths) output, output_level = conv(**input_spc.to_dict(), level=max_level, input=coalescent_features) else: output, output_level = conv(octrees, point_hierarchies, max_level, pyramids, exsum, coalescent_features) expected_output, expected_output_level = spc.conv3d( octrees, point_hierarchies, max_level, pyramids, exsum, coalescent_features, weight, kernel_vectors, jump=jump, bias=bias) assert torch.equal(output, expected_output) assert output_level == expected_output_level
def test_to_dict_default(self, octrees, lengths, expected_max_level, expected_pyramids, expected_exsum, expected_point_hierarchies): spc = Spc(octrees, lengths) d = spc.to_dict() assert d.keys() == {'octrees', 'lengths', 'max_level', 'pyramids', 'exsum', 'point_hierarchies'} assert torch.equal(d['octrees'], octrees) assert torch.equal(d['lengths'], lengths) assert d['max_level'] == expected_max_level assert torch.equal(d['pyramids'], expected_pyramids) assert torch.equal(d['exsum'], expected_exsum) assert torch.equal(d['point_hierarchies'], expected_point_hierarchies)
def test_to_dict_with_keys(self, keys, octrees, lengths, expected_max_level, expected_pyramids, expected_exsum, expected_point_hierarchies): keys = set(keys) spc = Spc(octrees, lengths) d = spc.to_dict(keys) assert d.keys() == keys if 'octrees' in keys: assert torch.equal(d['octrees'], octrees) if 'lengths' in keys: assert torch.equal(d['lengths'], lengths) if 'max_level' in keys: assert d['max_level'] == expected_max_level if 'pyramids' in keys: assert torch.equal(d['pyramids'], expected_pyramids) if 'exsum' in keys: assert torch.equal(d['exsum'], expected_exsum) if 'point_hierarchies' in keys: assert torch.equal(d['point_hierarchies'], expected_point_hierarchies)
def test_typo_to_dict_kwargs(self, octrees, lengths): spc = Spc(octrees, lengths) with pytest.raises(TypeError, match="_test_func got an unexpected keyword argument anotherarg"): #typo on purpose _test_func(**spc.to_dict(), anotherarg=1)
def test_to_dict_kwargs(self, octrees, lengths): spc = Spc(octrees, lengths) _test_func(**spc.to_dict(), another_arg=1)
def test_module_conv_transpose3d(self, height, width, depth, in_channels, out_channels, with_bias, octrees, lengths, max_level, pyramids, exsum, point_hierarchies, kernel_size, kernel_vectors, jump, with_spc_to_dict): stride = 2**jump if stride > kernel_size: pytest.skip('stride higher than kernel_size is not tested') in_level = max_level - jump in_num_nodes = torch.sum(pyramids[:, 0, -(2 + jump)]) coalescent_features = torch.rand((in_num_nodes, in_channels), device='cuda', requires_grad=True) conv = spc.ConvTranspose3d(in_channels, out_channels, kernel_vectors, jump, bias=with_bias).cuda() params = dict(conv.named_parameters()) weight = params['weight'] check_tensor(weight, shape=(kernel_vectors.shape[0], in_channels, out_channels), dtype=torch.float, device='cuda') if with_bias: assert len(params) == 2 bias = params['bias'] check_tensor(bias, shape=(out_channels, ), dtype=torch.float, device='cuda') else: assert len(params) == 1 bias = None buffers = dict(conv.named_buffers()) assert len(buffers) == 1 assert torch.equal(buffers['kernel_vectors'], kernel_vectors) assert repr(conv) == f'ConvTranspose3d(in={in_channels}, ' \ f'out={out_channels}, ' \ f'kernel_vector_size={kernel_vectors.shape[0]})' if with_spc_to_dict: input_spc = Spc(octrees, lengths) output, output_level = conv(**input_spc.to_dict(), level=in_level, input=coalescent_features) else: output, output_level = conv(octrees, point_hierarchies, in_level, pyramids, exsum, coalescent_features) expected_output, expected_output_level = spc.conv_transpose3d( octrees, point_hierarchies, in_level, pyramids, exsum, coalescent_features, weight, kernel_vectors, jump=jump, bias=bias) assert torch.equal(output, expected_output) assert output_level == expected_output_level
def test_conv_transpose3d(self, height, width, depth, in_channels, out_channels, sparsity_masks, dense_weight, bias, octrees, lengths, max_level, pyramids, exsum, point_hierarchies, kernel_vectors, kernel_size, kernel_offset, spc_weight, jump, with_spc_to_dict): stride = 2**jump if stride > kernel_size: pytest.skip('stride higher than kernel_size is not tested') out_sparsity_masks = sparsity_masks in_level = max_level - jump in_num_nodes = torch.sum(pyramids[:, 0, -(2 + jump)]) coalescent_features = torch.rand((in_num_nodes, in_channels), device='cuda', requires_grad=True) dense_weight = dense_weight.detach() dense_weight.requires_grad = True spc_weight = spc_weight.detach() spc_weight.requires_grad = True if with_spc_to_dict: input_spc = Spc(octrees, lengths) feature_grids = spc.to_dense(**input_spc.to_dict(), input=coalescent_features, level=in_level) else: feature_grids = spc.to_dense(point_hierarchies, pyramids, coalescent_features, in_level) feature_grids = feature_grids[:, :, :math.ceil(height / stride), :math. ceil(width / stride), :math.ceil(depth / stride)] feature_grids = feature_grids.detach() feature_grids.requires_grad = True if with_spc_to_dict: sparsity_masks = spc.to_dense(**input_spc.to_dict(), input=torch.ones_like( coalescent_features), level=in_level).bool() else: sparsity_masks = spc.to_dense(point_hierarchies, pyramids, torch.ones_like(coalescent_features), in_level).bool() sparsity_masks = sparsity_masks[:, 0, :math.ceil(height / stride), :math. ceil(width / stride), :math.ceil(depth / stride)] # test forward if with_spc_to_dict: output_features, output_level = spc.conv_transpose3d( **input_spc.to_dict(), level=in_level, input=coalescent_features, weight=spc_weight, kernel_vectors=kernel_vectors, jump=jump, bias=bias) output = spc.to_dense(**input_spc.to_dict(), input=output_features, level=output_level) else: output_features, output_level = spc.conv_transpose3d( octrees, point_hierarchies, in_level, pyramids, exsum, coalescent_features, spc_weight, kernel_vectors, jump=jump, bias=bias) output = spc.to_dense(point_hierarchies, pyramids, output_features, output_level) output = output[:, :, :height, :width, :depth] expected_output = torch.nn.functional.conv_transpose3d( feature_grids, dense_weight.permute(1, 0, 2, 3, 4), stride=stride, bias=bias, output_padding=stride - 1)[:, :, kernel_offset:height + kernel_offset, kernel_offset:width + kernel_offset, kernel_offset:depth + kernel_offset] expected_output *= out_sparsity_masks.unsqueeze(1) assert output_level == max_level assert torch.allclose(output, expected_output, rtol=1e-3, atol=1e-3) # test backward grad_out = torch.rand_like(expected_output) expected_output.backward(grad_out) output.backward(grad_out) _, _, sparsified_grad = spc.feature_grids_to_spc( feature_grids.grad, sparsity_masks) assert torch.allclose(coalescent_features.grad, sparsified_grad, rtol=5e-2, atol=5e-2) assert torch.allclose(spc_weight.grad, dense_weight.grad.reshape( out_channels, in_channels, -1).permute(2, 1, 0), rtol=5e-2, atol=5e-2)
def test_conv3d(self, height, width, depth, in_channels, out_channels, kernel_size, feature_grids, sparsity_masks, dense_weight, bias, octrees, lengths, coalescent_features, max_level, pyramids, exsum, point_hierarchies, kernel_vectors, kernel_offset, spc_weight, jump, with_spc_to_dict): stride = 2**jump coalescent_features = coalescent_features.detach() coalescent_features.requires_grad = True spc_weight = spc_weight.detach() spc_weight.requires_grad = True if with_spc_to_dict: input_spc = Spc(octrees, lengths) output_features, output_level = spc.conv3d( **input_spc.to_dict(), level=input_spc.max_level, input=coalescent_features, weight=spc_weight, kernel_vectors=kernel_vectors, jump=jump, bias=bias) output = spc.to_dense(**input_spc.to_dict(), input=output_features, level=output_level) output_sparsity_masks = spc.to_dense(**input_spc.to_dict(), input=torch.ones_like( output_features, requires_grad=False), level=output_level) else: output_features, output_level = spc.conv3d(octrees, point_hierarchies, max_level, pyramids, exsum, coalescent_features, spc_weight, kernel_vectors, jump=jump, bias=bias) output = spc.to_dense(point_hierarchies, pyramids, output_features, output_level) output_sparsity_masks = spc.to_dense( point_hierarchies, pyramids, torch.ones_like(output_features, requires_grad=False), output_level) feature_grids = feature_grids.detach() feature_grids.requires_grad = True dense_weight = dense_weight.detach() dense_weight.requires_grad = True padded_input = torch.nn.functional.pad( feature_grids, (kernel_offset, kernel_size - 1 - kernel_offset, kernel_offset, kernel_size - 1 - kernel_offset, kernel_offset, kernel_size - 1 - kernel_offset)) expected_output = torch.nn.functional.conv3d(padded_input, dense_weight, stride=stride, bias=bias) expected_height, expected_width, expected_depth = expected_output.shape[ 2:] expected_output *= output_sparsity_masks[:, :, :expected_height, : expected_width, : expected_depth] assert torch.allclose( output[:, :, :expected_height, :expected_width, :expected_depth], expected_output, atol=1e-3, rtol=1e-3) grad_output = torch.rand_like(output) output.backward(grad_output) expected_output.backward(grad_output[:, :, :expected_height, : expected_width, :expected_depth]) _, _, sparsified_grad = spc.feature_grids_to_spc( feature_grids.grad, sparsity_masks) assert torch.allclose(coalescent_features.grad, sparsified_grad, rtol=1e-3, atol=1e-3) assert torch.allclose(spc_weight.grad, dense_weight.grad.reshape( out_channels, in_channels, -1).permute(2, 1, 0), rtol=5e-2, atol=5e-2)