def do_execute(self, images: Images): size = 3 mode = 'reflect' original = np.copy(images.data[0]) result = MedianFilter.filter_func(images, size, mode) th.assert_not_equals(result.data, original)
def run_serial(data, size, mode): """ Run the median filter in serial. """ th.switch_mp_off() cpu_result = MedianFilter.filter_func(data, size, mode) th.switch_mp_on() return cpu_result
def test_executed_no_helper_gpu(self): images = th.generate_images() size = 3 mode = 'reflect' original = np.copy(images.data[0]) result = MedianFilter.filter_func(images, size, mode, force_cpu=False) th.assert_not_equals(result.data, original)
def test_not_executed(self): images = th.generate_images() size = None mode = None original = np.copy(images.data[0]) result = MedianFilter.filter_func(images, size, mode) th.assert_not_equals(result.data, original)
def test_double_is_used_in_cuda_for_float_64_arrays(self): """ Run the median filter on the CPU and GPU with a float64 array. This demonstrates that replacing instances of 'float' with 'double' in the CUDA file is doing the right thing. """ size = 3 mode = "reflect" images = th.generate_shared_array(dtype="float64") gpu_result = MedianFilter.filter_func(images.copy(), size, mode, force_cpu=False) cpu_result = MedianFilter.filter_func(images.copy(), size, mode, force_cpu=True) npt.assert_almost_equal(gpu_result, cpu_result)
def test_gpu_result_matches_cpu_result_for_different_filter_sizes(self): """ Run the median filter on the CPU and GPU with different filter sizes. Check that the results match. """ mode = "reflect" for size in [5, 7, 9]: with self.subTest(size=size): images = th.generate_images() gpu_result = MedianFilter.filter_func(images.copy(), size, mode, force_cpu=False) cpu_result = MedianFilter.filter_func(images.copy(), size, mode, force_cpu=True) npt.assert_almost_equal(gpu_result.data, cpu_result.data)
def test_executed_no_helper_seq(self): images = th.generate_images() size = 3 mode = 'reflect' original = np.copy(images.data[0]) th.switch_mp_off() result = MedianFilter.filter_func(images, size, mode) th.switch_mp_on() th.assert_not_equals(result.data, original)
def test_gpu_result_matches_cpu_result_for_larger_images(self): """ Run the median filter on the CPU and GPU with a larger image size. Check that the results match. This test may reveal issues such as the grid and dimension size arguments going wrong. """ N = 1200 size = 3 mode = "reflect" images = th.generate_shared_array(shape=(20, N, N)) gpu_result = MedianFilter.filter_func(images.copy(), size, mode, force_cpu=False) cpu_result = MedianFilter.filter_func(images.copy(), size, mode, force_cpu=True) npt.assert_almost_equal(gpu_result, cpu_result)
def test_numpy_pad_modes_match_scipy_median_modes(self): """ Run the median filter on the GPU and CPU with the different scipy modes. Check that the results match. Should demonstrate that the arguments passed to numpy pad are the correct equivalents to the scipy modes. """ size = 3 for mode in modes(): with self.subTest(mode=mode): images = th.generate_shared_array() gpu_result = MedianFilter.filter_func(images.copy(), size, mode, force_cpu=False) cpu_result = MedianFilter.filter_func(images.copy(), size, mode, force_cpu=True) npt.assert_almost_equal(gpu_result[0], cpu_result[0])
def test_memory_change_acceptable(self): """ Expected behaviour for the filter is to be done in place without using more memory. In reality the memory is increased by about 40MB (4 April 2017), but this could change in the future. The reason why a 10% window is given on the expected size is to account for any library imports that may happen. This will still capture if the data is doubled, which is the main goal. """ images = th.generate_images() size = 3 mode = 'reflect' cached_memory = get_memory_usage_linux(kb=True)[0] MedianFilter.filter_func(images, size, mode) self.assertLess(get_memory_usage_linux(kb=True)[0], cached_memory * 1.1)
def test_image_slicing_works(self): """ Run the median filter on the CPU and GPU with an image stack that is larger than the limit permitted on the GPU. This demonstrates that the algorithm for slicing the stack and overwriting GPU arrays is working correctly. """ N = 30 size = 3 mode = "reflect" # Make the number of images in the stack exceed the maximum number of GPU-stored images n_images = gpu.MAX_GPU_SLICES * 3 images = th.generate_shared_array(shape=(n_images, N, N)) gpu_result = MedianFilter.filter_func(images.copy(), size, mode, force_cpu=False) cpu_result = MedianFilter.filter_func(images.copy(), size, mode, force_cpu=True) npt.assert_almost_equal(gpu_result, cpu_result)
def test_executed_with_nan(self, _, use_cpu): if not use_cpu and not gpu.gpu_available(): self.skipTest(reason="Skip GPU tests if cupy isn't installed") shape = (1, 20, 20) images = th.generate_images(shape=shape, seed=2021) images.data[0, 0, 1] = np.nan # single edge images.data[0, 4, 4] = np.nan # single images.data[0, 4, 7] = np.nan # diagonal neighbours images.data[0, 5, 8] = np.nan images.data[0, 7:9, 2:4] = np.nan # 2x2 block images.data[0, 7:9, 6:9] = np.nan # 2x3 images.data[0, 12:15, 2:5] = np.nan # 3x3 self.assertTrue(np.any(np.isnan(images.data))) result = MedianFilter.filter_func(images.copy(), 3, 'reflect', force_cpu=use_cpu) npt.assert_equal(np.isnan(result.data), np.isnan(images.data))
def test_array_input_unchanged_when_gpu_runs_out_of_memory(self): """ Mock the GPU running out of memory. Check that this leaves the input array to be unchanged. """ import cupy as cp N = 200 n_images = 2000 size = 3 mode = "reflect" images = th.generate_shared_array(shape=(n_images, N, N)) with mock.patch( "mantidimaging.core.gpu.utility._send_single_array_to_gpu", side_effect=cp.cuda.memory.OutOfMemoryError(0, 0)): gpu_result = MedianFilter.filter_func(images, size, mode, force_cpu=False) npt.assert_equal(gpu_result, images)