예제 #1
0
    def do_execute(self, images: Images):
        size = 3
        mode = 'reflect'

        original = np.copy(images.data[0])
        result = MedianFilter.filter_func(images, size, mode)
        th.assert_not_equals(result.data, original)
예제 #2
0
 def run_serial(data, size, mode):
     """
     Run the median filter in serial.
     """
     th.switch_mp_off()
     cpu_result = MedianFilter.filter_func(data, size, mode)
     th.switch_mp_on()
     return cpu_result
예제 #3
0
    def test_executed_no_helper_gpu(self):
        images = th.generate_images()

        size = 3
        mode = 'reflect'

        original = np.copy(images.data[0])
        result = MedianFilter.filter_func(images, size, mode, force_cpu=False)

        th.assert_not_equals(result.data, original)
예제 #4
0
    def test_not_executed(self):
        images = th.generate_images()

        size = None
        mode = None

        original = np.copy(images.data[0])
        result = MedianFilter.filter_func(images, size, mode)

        th.assert_not_equals(result.data, original)
예제 #5
0
    def test_double_is_used_in_cuda_for_float_64_arrays(self):
        """
        Run the median filter on the CPU and GPU with a float64 array. This demonstrates that replacing instances of
        'float' with 'double' in the CUDA file is doing the right thing.
        """
        size = 3
        mode = "reflect"
        images = th.generate_shared_array(dtype="float64")

        gpu_result = MedianFilter.filter_func(images.copy(),
                                              size,
                                              mode,
                                              force_cpu=False)
        cpu_result = MedianFilter.filter_func(images.copy(),
                                              size,
                                              mode,
                                              force_cpu=True)

        npt.assert_almost_equal(gpu_result, cpu_result)
예제 #6
0
    def test_gpu_result_matches_cpu_result_for_different_filter_sizes(self):
        """
        Run the median filter on the CPU and GPU with different filter sizes. Check that the results match.
        """
        mode = "reflect"
        for size in [5, 7, 9]:
            with self.subTest(size=size):

                images = th.generate_images()

                gpu_result = MedianFilter.filter_func(images.copy(),
                                                      size,
                                                      mode,
                                                      force_cpu=False)
                cpu_result = MedianFilter.filter_func(images.copy(),
                                                      size,
                                                      mode,
                                                      force_cpu=True)

                npt.assert_almost_equal(gpu_result.data, cpu_result.data)
예제 #7
0
    def test_executed_no_helper_seq(self):
        images = th.generate_images()

        size = 3
        mode = 'reflect'

        original = np.copy(images.data[0])
        th.switch_mp_off()
        result = MedianFilter.filter_func(images, size, mode)
        th.switch_mp_on()

        th.assert_not_equals(result.data, original)
예제 #8
0
    def test_gpu_result_matches_cpu_result_for_larger_images(self):
        """
        Run the median filter on the CPU and GPU with a larger image size. Check that the results match. This test may
        reveal issues such as the grid and dimension size arguments going wrong.
        """
        N = 1200
        size = 3
        mode = "reflect"

        images = th.generate_shared_array(shape=(20, N, N))

        gpu_result = MedianFilter.filter_func(images.copy(),
                                              size,
                                              mode,
                                              force_cpu=False)
        cpu_result = MedianFilter.filter_func(images.copy(),
                                              size,
                                              mode,
                                              force_cpu=True)

        npt.assert_almost_equal(gpu_result, cpu_result)
예제 #9
0
    def test_numpy_pad_modes_match_scipy_median_modes(self):
        """
        Run the median filter on the GPU and CPU with the different scipy modes. Check that the results match.
        Should demonstrate that the arguments passed to numpy pad are the correct equivalents to the scipy modes.
        """
        size = 3
        for mode in modes():
            with self.subTest(mode=mode):

                images = th.generate_shared_array()

                gpu_result = MedianFilter.filter_func(images.copy(),
                                                      size,
                                                      mode,
                                                      force_cpu=False)
                cpu_result = MedianFilter.filter_func(images.copy(),
                                                      size,
                                                      mode,
                                                      force_cpu=True)

                npt.assert_almost_equal(gpu_result[0], cpu_result[0])
예제 #10
0
    def test_memory_change_acceptable(self):
        """
        Expected behaviour for the filter is to be done in place
        without using more memory.

        In reality the memory is increased by about 40MB (4 April 2017),
        but this could change in the future.

        The reason why a 10% window is given on the expected size is
        to account for any library imports that may happen.

        This will still capture if the data is doubled, which is the main goal.
        """
        images = th.generate_images()
        size = 3
        mode = 'reflect'

        cached_memory = get_memory_usage_linux(kb=True)[0]

        MedianFilter.filter_func(images, size, mode)

        self.assertLess(get_memory_usage_linux(kb=True)[0], cached_memory * 1.1)
예제 #11
0
    def test_image_slicing_works(self):
        """
        Run the median filter on the CPU and GPU with an image stack that is larger than the limit permitted on the GPU.
        This demonstrates that the algorithm for slicing the stack and overwriting GPU arrays is working correctly.
        """
        N = 30
        size = 3
        mode = "reflect"

        # Make the number of images in the stack exceed the maximum number of GPU-stored images
        n_images = gpu.MAX_GPU_SLICES * 3

        images = th.generate_shared_array(shape=(n_images, N, N))

        gpu_result = MedianFilter.filter_func(images.copy(),
                                              size,
                                              mode,
                                              force_cpu=False)
        cpu_result = MedianFilter.filter_func(images.copy(),
                                              size,
                                              mode,
                                              force_cpu=True)

        npt.assert_almost_equal(gpu_result, cpu_result)
예제 #12
0
    def test_execute_wrapper_return_is_runnable(self):
        """
        Test that the partial returned by execute_wrapper can be executed (kwargs are named correctly)
        """
        size_field = mock.Mock()
        size_field.value = mock.Mock(return_value=0)
        mode_field = mock.Mock()
        mode_field.currentText = mock.Mock(return_value=0)
        use_gpu_field = mock.Mock()
        use_gpu_field.isChecked = mock.Mock(return_value=False)
        execute_func = MedianFilter.execute_wrapper(size_field, mode_field, use_gpu_field)

        images = th.generate_images()
        execute_func(images)

        self.assertEqual(size_field.value.call_count, 1)
        self.assertEqual(mode_field.currentText.call_count, 1)
        self.assertEqual(use_gpu_field.isChecked.call_count, 1)
예제 #13
0
    def test_executed_with_nan(self, _, use_cpu):
        if not use_cpu and not gpu.gpu_available():
            self.skipTest(reason="Skip GPU tests if cupy isn't installed")
        shape = (1, 20, 20)
        images = th.generate_images(shape=shape, seed=2021)

        images.data[0, 0, 1] = np.nan  # single edge
        images.data[0, 4, 4] = np.nan  # single
        images.data[0, 4, 7] = np.nan  # diagonal neighbours
        images.data[0, 5, 8] = np.nan
        images.data[0, 7:9, 2:4] = np.nan  # 2x2 block
        images.data[0, 7:9, 6:9] = np.nan  # 2x3
        images.data[0, 12:15, 2:5] = np.nan  # 3x3
        self.assertTrue(np.any(np.isnan(images.data)))

        result = MedianFilter.filter_func(images.copy(),
                                          3,
                                          'reflect',
                                          force_cpu=use_cpu)

        npt.assert_equal(np.isnan(result.data), np.isnan(images.data))
예제 #14
0
    def test_array_input_unchanged_when_gpu_runs_out_of_memory(self):
        """
        Mock the GPU running out of memory. Check that this leaves the input array to be unchanged.
        """
        import cupy as cp

        N = 200
        n_images = 2000
        size = 3
        mode = "reflect"

        images = th.generate_shared_array(shape=(n_images, N, N))

        with mock.patch(
                "mantidimaging.core.gpu.utility._send_single_array_to_gpu",
                side_effect=cp.cuda.memory.OutOfMemoryError(0, 0)):
            gpu_result = MedianFilter.filter_func(images,
                                                  size,
                                                  mode,
                                                  force_cpu=False)

        npt.assert_equal(gpu_result, images)