def test_module_conv3d(self, height, width, depth, in_channels, out_channels, with_bias, octrees, lengths, coalescent_features, max_level, pyramids, exsum, point_hierarchies, kernel_vectors, jump, with_spc_to_dict): conv = spc.Conv3d(in_channels, out_channels, kernel_vectors, jump, bias=with_bias).cuda() params = dict(conv.named_parameters()) weight = params['weight'] check_tensor(weight, shape=(kernel_vectors.shape[0], in_channels, out_channels), dtype=torch.float, device='cuda') if with_bias: assert len(params) == 2 bias = params['bias'] check_tensor(bias, shape=(out_channels, ), dtype=torch.float, device='cuda') else: assert len(params) == 1 bias = None buffers = dict(conv.named_buffers()) assert len(buffers) == 1 assert torch.equal(buffers['kernel_vectors'], kernel_vectors) assert repr(conv) == f'Conv3d(in={in_channels}, out={out_channels}, ' \ f'kernel_vector_size={kernel_vectors.shape[0]})' if with_spc_to_dict: input_spc = Spc(octrees, lengths) output, output_level = conv(**input_spc.to_dict(), level=max_level, input=coalescent_features) else: output, output_level = conv(octrees, point_hierarchies, max_level, pyramids, exsum, coalescent_features) expected_output, expected_output_level = spc.conv3d( octrees, point_hierarchies, max_level, pyramids, exsum, coalescent_features, weight, kernel_vectors, jump=jump, bias=bias) assert torch.equal(output, expected_output) assert output_level == expected_output_level
def test_to_dict_default(self, octrees, lengths, expected_max_level, expected_pyramids, expected_exsum, expected_point_hierarchies): spc = Spc(octrees, lengths) d = spc.to_dict() assert d.keys() == {'octrees', 'lengths', 'max_level', 'pyramids', 'exsum', 'point_hierarchies'} assert torch.equal(d['octrees'], octrees) assert torch.equal(d['lengths'], lengths) assert d['max_level'] == expected_max_level assert torch.equal(d['pyramids'], expected_pyramids) assert torch.equal(d['exsum'], expected_exsum) assert torch.equal(d['point_hierarchies'], expected_point_hierarchies)
def test_to_cuda(self, using_to, octrees, lengths, expected_max_level, expected_pyramids, expected_exsum, expected_point_hierarchies): spc = Spc(octrees.cpu(), lengths, expected_max_level, expected_pyramids, expected_exsum.cpu(), expected_point_hierarchies.cpu()) if using_to: spc = spc.to('cuda') else: spc = spc.cuda() assert torch.equal(spc.octrees, octrees) assert torch.equal(spc.lengths, lengths) assert spc.max_level == expected_max_level assert torch.equal(spc.pyramids, expected_pyramids) assert torch.equal(spc.exsum, expected_exsum) assert torch.equal(spc.point_hierarchies, expected_point_hierarchies)
def test_from_list(self, octrees, lengths): octrees_list = [] start_idx = 0 for length in lengths: octrees_list.append(octrees[start_idx:start_idx + length]) start_idx += length spc = Spc.from_list(octrees_list) assert torch.equal(spc.octrees, octrees) assert torch.equal(spc.lengths, lengths)
def test_non_init_private_attr(self, octrees, lengths): """Check that private placeholder attributes are not initialized after constructor""" spc = Spc(octrees, lengths) assert torch.equal(spc.octrees, octrees) assert torch.equal(spc.lengths, lengths) assert spc._max_level is None assert spc._pyramids is None assert spc._exsum is None assert spc._point_hierarchies is None
def test_to_dict_with_keys(self, keys, octrees, lengths, expected_max_level, expected_pyramids, expected_exsum, expected_point_hierarchies): keys = set(keys) spc = Spc(octrees, lengths) d = spc.to_dict(keys) assert d.keys() == keys if 'octrees' in keys: assert torch.equal(d['octrees'], octrees) if 'lengths' in keys: assert torch.equal(d['lengths'], lengths) if 'max_level' in keys: assert d['max_level'] == expected_max_level if 'pyramids' in keys: assert torch.equal(d['pyramids'], expected_pyramids) if 'exsum' in keys: assert torch.equal(d['exsum'], expected_exsum) if 'point_hierarchies' in keys: assert torch.equal(d['point_hierarchies'], expected_point_hierarchies)
def test_init_private_attr(self, octrees, lengths, expected_max_level, expected_pyramids, expected_exsum, expected_point_hierarchies): """Check that private placeholder attributes are initialized if specified at constructor""" spc = Spc(octrees, lengths, expected_max_level, expected_pyramids, expected_exsum, expected_point_hierarchies) assert torch.equal(spc.octrees, octrees) assert torch.equal(spc.lengths, lengths) assert spc._max_level == expected_max_level assert torch.equal(spc._pyramids, expected_pyramids) assert torch.equal(spc._exsum, expected_exsum) assert torch.equal(spc._point_hierarchies, expected_point_hierarchies)
def test_cpu_init(self, octrees, lengths, expected_max_level, expected_pyramids, expected_exsum, expected_point_hierarchies): octrees = octrees.cpu() expected_exsum = expected_exsum.cpu() expected_point_hierarchies = expected_point_hierarchies.cpu() spc = Spc(octrees, lengths, expected_max_level, expected_pyramids, expected_exsum, expected_point_hierarchies) assert torch.equal(spc.octrees, octrees) assert torch.equal(spc.lengths, lengths) assert spc.max_level == expected_max_level assert torch.equal(spc.pyramids, expected_pyramids) assert torch.equal(spc.exsum, expected_exsum) assert torch.equal(spc.point_hierarchies, expected_point_hierarchies)
def test_scan_octrees_properties(self, octrees, lengths, expected_max_level, expected_pyramids, expected_exsum): """Check that properties generated by scan_octrees are accessible""" spc = Spc(octrees, lengths) assert torch.equal(spc.octrees, octrees) assert torch.equal(spc.lengths, lengths) assert spc.max_level == expected_max_level assert torch.equal(spc.pyramids, expected_pyramids) assert torch.equal(spc.exsum, expected_exsum) # This is checking that: # 1) data pointer on properties is the same than on private attributes # 2) private attributes are not recomputed assert spc._pyramids.data_ptr() == spc.pyramids.data_ptr() assert spc._exsum.data_ptr() == spc.exsum.data_ptr() # _point_hierarchies is still not initialized assert spc._point_hierarchies is None
def test_generate_points_properties(self, octrees, lengths, expected_point_hierarchies): """Check that properties generated by generate_points are accessible""" spc = Spc(octrees, lengths) assert spc._point_hierarchies is None assert torch.equal(spc.point_hierarchies, expected_point_hierarchies) # point_hierarchies, generated _max_level, _pyramids and _exsum # as they are dependencies assert spc._max_level is not None assert spc._pyramids is not None assert spc._exsum is not None # Check that calling point_hierarchies properties again # is not recomputing the private attributes old_pyramids_data_ptr = spc._pyramids.data_ptr() old_exsum_data_ptr = spc._exsum.data_ptr() assert spc._point_hierarchies.data_ptr() == spc.point_hierarchies.data_ptr() assert old_pyramids_data_ptr == spc._pyramids.data_ptr() assert old_exsum_data_ptr == spc._exsum.data_ptr()
def test_simple(self, with_spc_to_dict): bits_t = torch.tensor( [[0, 0, 0, 1, 0, 0, 0, 1], [0, 0, 0, 0, 0, 1, 1, 0], [0, 0, 1, 0, 0, 0, 0, 0], [1, 0, 0, 0, 0, 0, 0, 0], [1, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0, 0], [1, 0, 0, 0, 0, 0, 0, 0], [0, 1, 1, 1, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1], [0, 1, 0, 1, 0, 1, 0, 1]], device='cuda', dtype=torch.float) octrees = bits_to_uint8(torch.flip(bits_t, dims=(-1, ))) lengths = torch.tensor([6, 5], dtype=torch.int) max_level, pyramids, exsum = scan_octrees(octrees, lengths) point_hierarchies = generate_points(octrees, pyramids, exsum) coalescent_features = torch.tensor([ 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15., 16. ], device='cuda', dtype=torch.float).reshape(-1, 1) feat_idx = torch.tensor( [[0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 6, 7, 6, 6, 6, 6, 7, 7, 7, 7, 6, 6, 7, 7], [1, 3, 1, 4, 4, 4, 5, 5, 4, 4, 5, 5, 6, 7, 6, 7], [3, 1, 3, 5, 6, 7, 6, 7, 6, 7, 6, 7, 4, 4, 4, 4]], dtype=torch.long) expected_feature_grids = torch.zeros((2, 1, 8, 8, 8), dtype=torch.float, device='cuda') expected_feature_grids[feat_idx[0], :, feat_idx[1], feat_idx[2], feat_idx[3]] = coalescent_features if with_spc_to_dict: feature_grids = to_dense(**Spc(octrees, lengths).to_dict(), input=coalescent_features) else: feature_grids = to_dense(point_hierarchies, pyramids, coalescent_features, max_level) assert torch.equal(feature_grids, expected_feature_grids)
def test_typo_to_dict_kwargs(self, octrees, lengths): spc = Spc(octrees, lengths) with pytest.raises(TypeError, match="_test_func got an unexpected keyword argument anotherarg"): #typo on purpose _test_func(**spc.to_dict(), anotherarg=1)
def test_to_dict_kwargs(self, octrees, lengths): spc = Spc(octrees, lengths) _test_func(**spc.to_dict(), another_arg=1)
def test_module_conv_transpose3d(self, height, width, depth, in_channels, out_channels, with_bias, octrees, lengths, max_level, pyramids, exsum, point_hierarchies, kernel_size, kernel_vectors, jump, with_spc_to_dict): stride = 2**jump if stride > kernel_size: pytest.skip('stride higher than kernel_size is not tested') in_level = max_level - jump in_num_nodes = torch.sum(pyramids[:, 0, -(2 + jump)]) coalescent_features = torch.rand((in_num_nodes, in_channels), device='cuda', requires_grad=True) conv = spc.ConvTranspose3d(in_channels, out_channels, kernel_vectors, jump, bias=with_bias).cuda() params = dict(conv.named_parameters()) weight = params['weight'] check_tensor(weight, shape=(kernel_vectors.shape[0], in_channels, out_channels), dtype=torch.float, device='cuda') if with_bias: assert len(params) == 2 bias = params['bias'] check_tensor(bias, shape=(out_channels, ), dtype=torch.float, device='cuda') else: assert len(params) == 1 bias = None buffers = dict(conv.named_buffers()) assert len(buffers) == 1 assert torch.equal(buffers['kernel_vectors'], kernel_vectors) assert repr(conv) == f'ConvTranspose3d(in={in_channels}, ' \ f'out={out_channels}, ' \ f'kernel_vector_size={kernel_vectors.shape[0]})' if with_spc_to_dict: input_spc = Spc(octrees, lengths) output, output_level = conv(**input_spc.to_dict(), level=in_level, input=coalescent_features) else: output, output_level = conv(octrees, point_hierarchies, in_level, pyramids, exsum, coalescent_features) expected_output, expected_output_level = spc.conv_transpose3d( octrees, point_hierarchies, in_level, pyramids, exsum, coalescent_features, weight, kernel_vectors, jump=jump, bias=bias) assert torch.equal(output, expected_output) assert output_level == expected_output_level
def test_conv_transpose3d(self, height, width, depth, in_channels, out_channels, sparsity_masks, dense_weight, bias, octrees, lengths, max_level, pyramids, exsum, point_hierarchies, kernel_vectors, kernel_size, kernel_offset, spc_weight, jump, with_spc_to_dict): stride = 2**jump if stride > kernel_size: pytest.skip('stride higher than kernel_size is not tested') out_sparsity_masks = sparsity_masks in_level = max_level - jump in_num_nodes = torch.sum(pyramids[:, 0, -(2 + jump)]) coalescent_features = torch.rand((in_num_nodes, in_channels), device='cuda', requires_grad=True) dense_weight = dense_weight.detach() dense_weight.requires_grad = True spc_weight = spc_weight.detach() spc_weight.requires_grad = True if with_spc_to_dict: input_spc = Spc(octrees, lengths) feature_grids = spc.to_dense(**input_spc.to_dict(), input=coalescent_features, level=in_level) else: feature_grids = spc.to_dense(point_hierarchies, pyramids, coalescent_features, in_level) feature_grids = feature_grids[:, :, :math.ceil(height / stride), :math. ceil(width / stride), :math.ceil(depth / stride)] feature_grids = feature_grids.detach() feature_grids.requires_grad = True if with_spc_to_dict: sparsity_masks = spc.to_dense(**input_spc.to_dict(), input=torch.ones_like( coalescent_features), level=in_level).bool() else: sparsity_masks = spc.to_dense(point_hierarchies, pyramids, torch.ones_like(coalescent_features), in_level).bool() sparsity_masks = sparsity_masks[:, 0, :math.ceil(height / stride), :math. ceil(width / stride), :math.ceil(depth / stride)] # test forward if with_spc_to_dict: output_features, output_level = spc.conv_transpose3d( **input_spc.to_dict(), level=in_level, input=coalescent_features, weight=spc_weight, kernel_vectors=kernel_vectors, jump=jump, bias=bias) output = spc.to_dense(**input_spc.to_dict(), input=output_features, level=output_level) else: output_features, output_level = spc.conv_transpose3d( octrees, point_hierarchies, in_level, pyramids, exsum, coalescent_features, spc_weight, kernel_vectors, jump=jump, bias=bias) output = spc.to_dense(point_hierarchies, pyramids, output_features, output_level) output = output[:, :, :height, :width, :depth] expected_output = torch.nn.functional.conv_transpose3d( feature_grids, dense_weight.permute(1, 0, 2, 3, 4), stride=stride, bias=bias, output_padding=stride - 1)[:, :, kernel_offset:height + kernel_offset, kernel_offset:width + kernel_offset, kernel_offset:depth + kernel_offset] expected_output *= out_sparsity_masks.unsqueeze(1) assert output_level == max_level assert torch.allclose(output, expected_output, rtol=1e-3, atol=1e-3) # test backward grad_out = torch.rand_like(expected_output) expected_output.backward(grad_out) output.backward(grad_out) _, _, sparsified_grad = spc.feature_grids_to_spc( feature_grids.grad, sparsity_masks) assert torch.allclose(coalescent_features.grad, sparsified_grad, rtol=5e-2, atol=5e-2) assert torch.allclose(spc_weight.grad, dense_weight.grad.reshape( out_channels, in_channels, -1).permute(2, 1, 0), rtol=5e-2, atol=5e-2)
def test_conv3d(self, height, width, depth, in_channels, out_channels, kernel_size, feature_grids, sparsity_masks, dense_weight, bias, octrees, lengths, coalescent_features, max_level, pyramids, exsum, point_hierarchies, kernel_vectors, kernel_offset, spc_weight, jump, with_spc_to_dict): stride = 2**jump coalescent_features = coalescent_features.detach() coalescent_features.requires_grad = True spc_weight = spc_weight.detach() spc_weight.requires_grad = True if with_spc_to_dict: input_spc = Spc(octrees, lengths) output_features, output_level = spc.conv3d( **input_spc.to_dict(), level=input_spc.max_level, input=coalescent_features, weight=spc_weight, kernel_vectors=kernel_vectors, jump=jump, bias=bias) output = spc.to_dense(**input_spc.to_dict(), input=output_features, level=output_level) output_sparsity_masks = spc.to_dense(**input_spc.to_dict(), input=torch.ones_like( output_features, requires_grad=False), level=output_level) else: output_features, output_level = spc.conv3d(octrees, point_hierarchies, max_level, pyramids, exsum, coalescent_features, spc_weight, kernel_vectors, jump=jump, bias=bias) output = spc.to_dense(point_hierarchies, pyramids, output_features, output_level) output_sparsity_masks = spc.to_dense( point_hierarchies, pyramids, torch.ones_like(output_features, requires_grad=False), output_level) feature_grids = feature_grids.detach() feature_grids.requires_grad = True dense_weight = dense_weight.detach() dense_weight.requires_grad = True padded_input = torch.nn.functional.pad( feature_grids, (kernel_offset, kernel_size - 1 - kernel_offset, kernel_offset, kernel_size - 1 - kernel_offset, kernel_offset, kernel_size - 1 - kernel_offset)) expected_output = torch.nn.functional.conv3d(padded_input, dense_weight, stride=stride, bias=bias) expected_height, expected_width, expected_depth = expected_output.shape[ 2:] expected_output *= output_sparsity_masks[:, :, :expected_height, : expected_width, : expected_depth] assert torch.allclose( output[:, :, :expected_height, :expected_width, :expected_depth], expected_output, atol=1e-3, rtol=1e-3) grad_output = torch.rand_like(output) output.backward(grad_output) expected_output.backward(grad_output[:, :, :expected_height, : expected_width, :expected_depth]) _, _, sparsified_grad = spc.feature_grids_to_spc( feature_grids.grad, sparsity_masks) assert torch.allclose(coalescent_features.grad, sparsified_grad, rtol=1e-3, atol=1e-3) assert torch.allclose(spc_weight.grad, dense_weight.grad.reshape( out_channels, in_channels, -1).permute(2, 1, 0), rtol=5e-2, atol=5e-2)