Ejemplo n.º 1
0
    def test_origin_map(self):
        manager = ME.CoordinateManager(
            D=1, coordinate_map_type=ME.CoordinateMapType.CPU)
        coords = torch.IntTensor([[0, -3], [0, -2], [0, -1], [0, 0], [1, 1],
                                  [1, 2], [1, 3]])

        # key with batch_size 2
        key, (unique_map, inverse_map) = manager.insert_and_map(coords, [1])
        batch_indices, origin_map = manager.origin_map(key)
        print(origin_map)
        # self.assertTrue(set(origin_map[0].numpy()) == set([0, 1, 2, 3]))
        key = manager.origin()

        batch_coordinates = manager.get_coordinates(key)
        print(batch_coordinates)
        self.assertTrue(len(batch_coordinates) == 2)

        if not ME.is_cuda_available():
            return

        manager = ME.CoordinateManager(
            D=1,
            coordinate_map_type=ME.CoordinateMapType.CUDA,
            allocator_type=ME.GPUMemoryAllocatorType.PYTORCH,
        )
        key, (unique_map,
              inverse_map) = manager.insert_and_map(coords.to(0), [1])
        origin_map = manager.origin_map(key)
        print(origin_map)
        key = manager.origin()

        self.assertTrue(manager.number_of_unique_batch_indices() == 2)
        batch_coordinates = manager.get_coordinates(key)
        print(batch_coordinates)
        self.assertTrue(len(batch_coordinates) == 2)
Ejemplo n.º 2
0
    def test_gpu_allocator(self):
        if not ME.is_cuda_available():
            return

        # Set the global GPU memory manager backend. By default PYTORCH.
        ME.set_gpu_allocator(ME.GPUMemoryAllocatorType.PYTORCH)
        ME.set_gpu_allocator(ME.GPUMemoryAllocatorType.CUDA)

        # Create a coords man with the specified GPU memory manager backend.
        # No effect with CPU_ONLY build
        manager = ME.CoordinateManager(
            D=1,
            coordinate_map_type=ME.CoordinateMapType.CPU,
            allocator_type=ME.GPUMemoryAllocatorType.CUDA,
        )
 def test(self):
     self.assertTrue(ME.is_cuda_available() == torch.cuda.is_available())
     if ME.is_cuda_available():
         print(ME.cuda_version())
         print(ME.get_gpu_memory_info())