Пример #1
0
    def test_fwd_func_second_2d(self):
        # create data as shared array
        img = th.generate_shared_array()
        img2nd, orig_2nd = th.generate_shared_array_and_copy()

        img2nd = img2nd[0]

        # make sure it hasnt changed the original array
        expected = img + img2nd + 5
        assert expected[0, 0, 0] != img[0, 0, 0]
        assert expected[1, 0, 0] != img[1, 0, 0]
        assert expected[0, 4, 0] != img[0, 4, 0]
        assert expected[6, 0, 1] != img[6, 0, 1]

        # create partial
        f = ptsm.create_partial(add_inplace,
                                fwd_function=ptsm.inplace_second_2d,
                                add_arg=5)

        # execute parallel
        ptsm.execute(img, img2nd, f)

        # compare results
        npt.assert_equal(img, expected)
        npt.assert_equal(img2nd, orig_2nd[0])
Пример #2
0
    def test_memory_fwd_func_inplace(self):
        # create data as shared array
        img = th.generate_shared_array()
        img2nd, orig_2nd = th.generate_shared_array_and_copy()

        # make sure it hasnt changed the original array
        expected = img + img2nd + 5
        assert expected[0, 0, 0] != img[0, 0, 0]
        assert expected[1, 0, 0] != img[1, 0, 0]
        assert expected[0, 4, 0] != img[0, 4, 0]
        assert expected[6, 0, 1] != img[6, 0, 1]

        # create partial
        f = ptsm.create_partial(add_inplace,
                                fwd_function=ptsm.inplace,
                                add_arg=5)

        cached_memory = get_memory_usage_linux(kb=True)[0]
        # execute parallel
        ptsm.execute(img, img2nd, f)
        self.assertLess(
            get_memory_usage_linux(kb=True)[0], cached_memory * 1.1)
        # compare results
        npt.assert_equal(img, expected)
        npt.assert_equal(img2nd, orig_2nd)
Пример #3
0
    def test_memory_return_to_second(self):
        # create data as shared array
        img, orig_img = th.generate_shared_array_and_copy()
        img2nd = th.generate_shared_array()

        # make sure it hasnt changed the original array
        expected = img + img2nd + 5
        assert expected[0, 0, 0] != img[0, 0, 0]
        assert expected[1, 0, 0] != img[1, 0, 0]
        assert expected[0, 4, 0] != img[0, 4, 0]
        assert expected[6, 0, 1] != img[6, 0, 1]

        # create partial
        f = ptsm.create_partial(return_from_func,
                                fwd_function=ptsm.return_to_second,
                                add_arg=5)

        # execute parallel
        cached_memory = get_memory_usage_linux(kb=True)[0]
        res1, res2 = ptsm.execute(img, img2nd, f)
        self.assertLess(
            get_memory_usage_linux(kb=True)[0], cached_memory * 1.1)
        # compare results
        npt.assert_equal(res2, expected)
        npt.assert_equal(res1, orig_img)
Пример #4
0
    def test_double_is_used_in_cuda_for_float_64_arrays(self):
        """
        Run the median filter on the CPU and GPU with a float64 array. This demonstrates that replacing instances of
        'float' with 'double' in the CUDA file is doing the right thing.
        """
        size = 3
        mode = "reflect"
        images = th.generate_shared_array(dtype="float64")

        gpu_result = MedianFilter.filter_func(images.copy(),
                                              size,
                                              mode,
                                              force_cpu=False)
        cpu_result = self.run_serial(images.copy(), size, mode)

        npt.assert_almost_equal(gpu_result, cpu_result)
Пример #5
0
    def test_gpu_result_matches_cpu_result_for_different_filter_sizes(self):
        """
        Run the median filter on the CPU and GPU with different filter sizes. Check that the results match.
        """
        mode = "reflect"
        for size in [5, 7, 9]:
            with self.subTest(size=size):

                images = th.generate_shared_array()

                gpu_result = MedianFilter.filter_func(images.copy(),
                                                      size,
                                                      mode,
                                                      force_cpu=False)
                cpu_result = self.run_serial(images.copy(), size, mode)

                npt.assert_almost_equal(gpu_result, cpu_result)
Пример #6
0
    def test_gpu_result_matches_cpu_result_for_larger_images(self):
        """
        Run the median filter on the CPU and GPU with a larger image size. Check that the results match. This test may
        reveal issues such as the grid and dimension size arguments going wrong.
        """
        N = 1200
        size = 3
        mode = "reflect"

        images = th.generate_shared_array(shape=(20, N, N))

        gpu_result = MedianFilter.filter_func(images.copy(),
                                              size,
                                              mode,
                                              force_cpu=False)
        cpu_result = self.run_serial(images.copy(), size, mode)

        npt.assert_almost_equal(gpu_result, cpu_result)
Пример #7
0
    def test_numpy_pad_modes_match_scipy_median_modes(self):
        """
        Run the median filter on the GPU and CPU with the different scipy modes. Check that the results match.
        Should demonstrate that the arguments passed to numpy pad are the correct equivalents to the scipy modes.
        """
        size = 3
        for mode in modes():
            with self.subTest(mode=mode):

                images = th.generate_shared_array()

                gpu_result = MedianFilter.filter_func(images.copy(),
                                                      size,
                                                      mode,
                                                      force_cpu=False)
                cpu_result = self.run_serial(images.copy(), size, mode)

                npt.assert_almost_equal(gpu_result[0], cpu_result[0])
Пример #8
0
    def test_gpu_running_out_of_memory_causes_free_memory_to_be_called(self):
        """
        Mock the GPU running out of memory. Check that this causes the free memory block function to be called.
        """
        import cupy as cp

        N = 20
        n_images = 2

        images = th.generate_shared_array(shape=(n_images, N, N))

        with mock.patch(
                "mantidimaging.core.gpu.utility._send_single_array_to_gpu",
                side_effect=cp.cuda.memory.OutOfMemoryError(0, 0)):
            with mock.patch("mantidimaging.core.gpu.utility._free_memory_pool"
                            ) as mock_free_gpu:
                gpu._send_arrays_to_gpu_with_pinned_memory(
                    images, [cp.cuda.Stream() for _ in range(n_images)])

        mock_free_gpu.assert_called()
Пример #9
0
    def test_image_slicing_works(self):
        """
        Run the median filter on the CPU and GPU with an image stack that is larger than the limit permitted on the GPU.
        This demonstrates that the algorithm for slicing the stack and overwriting GPU arrays is working correctly.
        """
        N = 30
        size = 3
        mode = "reflect"

        # Make the number of images in the stack exceed the maximum number of GPU-stored images
        n_images = gpu.MAX_GPU_SLICES * 3

        images = th.generate_shared_array(shape=(n_images, N, N))

        gpu_result = MedianFilter.filter_func(images.copy(),
                                              size,
                                              mode,
                                              force_cpu=False)
        cpu_result = self.run_serial(images.copy(), size, mode)

        npt.assert_almost_equal(gpu_result, cpu_result)
Пример #10
0
    def test_array_input_unchanged_when_gpu_runs_out_of_memory(self):
        """
        Mock the GPU running out of memory. Check that this leaves the input array to be unchanged.
        """
        import cupy as cp

        N = 200
        n_images = 2000
        size = 3
        mode = "reflect"

        images = th.generate_shared_array(shape=(n_images, N, N))

        with mock.patch(
                "mantidimaging.core.gpu.utility._send_single_array_to_gpu",
                side_effect=cp.cuda.memory.OutOfMemoryError(0, 0)):
            gpu_result = MedianFilter.filter_func(images,
                                                  size,
                                                  mode,
                                                  force_cpu=False)

        npt.assert_equal(gpu_result, images)
Пример #11
0
    def test_return_to_first(self):
        # create data as shared array
        img = th.generate_shared_array()
        img2nd, orig_2nd = th.generate_shared_array_and_copy()

        # make sure it hasnt changed the original array
        expected = img + img2nd + 5
        assert expected[0, 0, 0] != img[0, 0, 0]
        assert expected[1, 0, 0] != img[1, 0, 0]
        assert expected[0, 4, 0] != img[0, 4, 0]
        assert expected[6, 0, 1] != img[6, 0, 1]

        # create partial
        f = ptsm.create_partial(return_from_func,
                                fwd_function=ptsm.return_to_first,
                                add_arg=5)

        # execute parallel
        res1, res2 = ptsm.execute(img, img2nd, f)

        # compare results
        npt.assert_equal(res1, expected)
        npt.assert_equal(res2, orig_2nd)