def test_gpu(self): if not torch.cuda.is_available(): return in_channels = 2 coords, feats, labels = data_loader(in_channels) feats = feats.double() feats.requires_grad_() input = SparseTensor(feats, coordinates=coords) pool = MinkowskiGlobalAvgPooling() output = pool(input) print(output) if not torch.cuda.is_available(): return input = SparseTensor(feats, coordinates=coords, device=0) output = pool(input) print(output) # Check backward fn = MinkowskiGlobalPoolingFunction() self.assertTrue( gradcheck( fn, ( input.F, pool.pooling_mode, input.coordinate_map_key, output.coordinate_map_key, input._manager, ), ) )
def test(self): in_channels, D = 2, 2 coords, feats, labels = data_loader(in_channels) feats = feats.double() feats.requires_grad_() input = SparseTensor(feats, coords) pool = MinkowskiGlobalAvgPooling() output = pool(input) print(output) # Check backward fn = MinkowskiGlobalPoolingFunction() self.assertTrue( gradcheck( fn, ( input.F, pool.pooling_mode, input.coordinate_map_key, output.coordinate_map_key, input._manager, ), ))