Ejemplo n.º 1
0
    def test_memory_fwd_func_inplace(self):
        # create data as shared array
        img = th.generate_shared_array()
        img2nd, orig_2nd = th.generate_shared_array_and_copy()

        # make sure it hasnt changed the original array
        expected = img + img2nd + 5
        assert expected[0, 0, 0] != img[0, 0, 0]
        assert expected[1, 0, 0] != img[1, 0, 0]
        assert expected[0, 4, 0] != img[0, 4, 0]
        assert expected[6, 0, 1] != img[6, 0, 1]

        # create partial
        f = ptsm.create_partial(add_inplace,
                                fwd_function=ptsm.inplace,
                                add_arg=5)

        cached_memory = get_memory_usage_linux(kb=True)[0]
        # execute parallel
        ptsm.execute(img, img2nd, f)
        self.assertLess(
            get_memory_usage_linux(kb=True)[0], cached_memory * 1.1)
        # compare results
        npt.assert_equal(img, expected)
        npt.assert_equal(img2nd, orig_2nd)
Ejemplo n.º 2
0
    def test_memory_return_to_second(self):
        # create data as shared array
        img, orig_img = th.generate_shared_array_and_copy()
        img2nd = th.generate_shared_array()

        # make sure it hasnt changed the original array
        expected = img + img2nd + 5
        assert expected[0, 0, 0] != img[0, 0, 0]
        assert expected[1, 0, 0] != img[1, 0, 0]
        assert expected[0, 4, 0] != img[0, 4, 0]
        assert expected[6, 0, 1] != img[6, 0, 1]

        # create partial
        f = ptsm.create_partial(return_from_func,
                                fwd_function=ptsm.return_to_second,
                                add_arg=5)

        # execute parallel
        cached_memory = get_memory_usage_linux(kb=True)[0]
        res1, res2 = ptsm.execute(img, img2nd, f)
        self.assertLess(
            get_memory_usage_linux(kb=True)[0], cached_memory * 1.1)
        # compare results
        npt.assert_equal(res2, expected)
        npt.assert_equal(res1, orig_img)
Ejemplo n.º 3
0
    def test_memory_fwd_func_inplace(self):
        # create data as shared array
        img, _ = th.generate_shared_array_and_copy()
        add_arg = 5

        expected = img + add_arg
        assert expected[0, 0, 0] != img[0, 0, 0]
        assert expected[1, 0, 0] != img[1, 0, 0]
        assert expected[0, 4, 0] != img[0, 4, 0]
        assert expected[6, 0, 1] != img[6, 0, 1]

        # create partial
        f = psm.create_partial(add_inplace,
                               fwd_func=psm.inplace,
                               add_arg=add_arg)

        cached_memory = get_memory_usage_linux(kb=True)[0]
        # execute parallel
        img = psm.execute(img, f)

        self.assertLess(
            get_memory_usage_linux(kb=True)[0], cached_memory * 1.1)

        # compare results
        npt.assert_equal(img, expected)
Ejemplo n.º 4
0
    def test_memory_change_acceptable(self):
        """
        Expected behaviour for the filter is to be done in place
        without using more memory.

        In reality the memory is increased by about 40MB (4 April 2017),
        but this could change in the future.

        The reason why a 10% window is given on the expected size is
        to account for any library imports that may happen.

        This will still capture if the data is doubled, which is the main goal.
        """
        images = th.generate_images()

        cached_memory = get_memory_usage_linux(kb=True)[0]

        ClipValuesFilter().filter_func(images,
                                       clip_min=0.2,
                                       clip_max=0.8,
                                       clip_min_new_value=0.1,
                                       clip_max_new_value=0.9)

        self.assertLess(
            get_memory_usage_linux(kb=True)[0], cached_memory * 1.1)
Ejemplo n.º 5
0
    def test_memory_fwd_func(self):
        """
        Expected behaviour for the filter is to be done in place
        without using more memory.
        In reality the memory is increased by about 40MB (4 April 2017),
        but this could change in the future.
        The reason why a 10% window is given on the expected size is
        to account for any library imports that may happen.
        This will still capture if the data is doubled, which is the main goal.
        """
        # create data as shared array
        img, _ = th.generate_shared_array_and_copy()
        add_arg = 5

        expected = img + add_arg
        assert expected[0, 0, 0] != img[0, 0, 0]
        assert expected[1, 0, 0] != img[1, 0, 0]
        assert expected[0, 4, 0] != img[0, 4, 0]
        assert expected[6, 0, 1] != img[6, 0, 1]

        # create partial
        f = psm.create_partial(return_from_func,
                               fwd_func=psm.return_fwd_func,
                               add_arg=add_arg)

        cached_memory = get_memory_usage_linux(kb=True)[0]
        # execute parallel
        img = psm.execute(img, f)
        self.assertLess(
            get_memory_usage_linux(kb=True)[0], cached_memory * 1.1)

        # compare results
        npt.assert_equal(img, expected)
Ejemplo n.º 6
0
    def test_memory_change_acceptable(self):
        """
        Expected behaviour for the filter is to be done in place
        without using more memory.

        In reality the memory is increased by about 40MB (4 April 2017),
        but this could change in the future.

        The reason why a 10% window is given on the expected size is
        to account for any library imports that may happen.

        This will still capture if the data is doubled, which is the main goal.
        """
        images = th.generate_images()
        roi = SensibleROI.from_list([1, 1, 5, 5])

        cached_memory = get_memory_usage_linux(mb=True)[0]

        result = CropCoordinatesFilter.filter_func(images, roi)

        self.assertLess(
            get_memory_usage_linux(mb=True)[0], cached_memory * 1.1)

        expected_shape = (10, 4, 4)

        npt.assert_equal(result.data.shape, expected_shape)
Ejemplo n.º 7
0
    def test_memory_change_acceptable(self):
        images = th.generate_images()
        # invalid threshold
        run_ring_removal = False

        cached_memory = get_memory_usage_linux(kb=True)[0]

        RingRemovalFilter.filter_func(images, run_ring_removal, cores=1)

        self.assertLess(
            get_memory_usage_linux(kb=True)[0], cached_memory * 1.1)
Ejemplo n.º 8
0
    def test_memory_change_acceptable(self):
        """
        This filter will increase the memory usage as it has to allocate memory
        for the new resized shape
        """
        images = th.generate_images()

        mode = 'reflect'
        # This about doubles the memory. Value found from running the test
        val = 100.

        expected_x = int(images.data.shape[1] * val)
        expected_y = int(images.data.shape[2] * val)

        cached_memory = get_memory_usage_linux(kb=True)[0]

        result = RebinFilter.filter_func(images, val, mode)

        self.assertLess(get_memory_usage_linux(kb=True)[0], cached_memory * 2)

        npt.assert_equal(result.data.shape[1], expected_x)
        npt.assert_equal(result.data.shape[2], expected_y)
Ejemplo n.º 9
0
    def test_memory_change_acceptable(self):
        """
        Expected behaviour for the filter is to be done in place
        without using more memory.

        In reality the memory is increased by about 40MB (4 April 2017),
        but this could change in the future.

        The reason why a 10% window is given on the expected size is
        to account for any library imports that may happen.

        This will still capture if the data is doubled, which is the main goal.
        """
        images = th.generate_images()

        cached_memory = get_memory_usage_linux(kb=True)[0]
        original = np.copy(images.data)

        result = MinusLogFilter.filter_func(images, minus_log=True)

        self.assertLess(
            get_memory_usage_linux(kb=True)[0], cached_memory * 1.1)
        th.assert_not_equals(result.data, original)
Ejemplo n.º 10
0
    def test_memory_change_acceptable(self):
        """
        Expected behaviour for the filter is to be done in place
        without using more memory.

        In reality the memory is increased by about 40MB (4 April 2017),
        but this could change in the future.

        The reason why a 10% window is given on the expected size is
        to account for any library imports that may happen.

        This will still capture if the data is doubled, which is the main goal.
        """
        images = th.generate_images()
        size = 3
        mode = 'reflect'
        order = 1

        cached_memory = get_memory_usage_linux(kb=True)[0]

        GaussianFilter.filter_func(images, size, mode, order)

        self.assertLess(get_memory_usage_linux(kb=True)[0], cached_memory * 1.1)
Ejemplo n.º 11
0
    def test_memory_change_acceptable(self):
        """
        Expected behaviour for the filter is to be done in place
        without using more memory.

        In reality the memory is increased by about 40MB (4 April 2017),
        but this could change in the future.

        The reason why a 10% window is given on the expected size is
        to account for any library imports that may happen.

        This will still capture if the data is doubled, which is the main goal.
        """
        # only works on square images
        images = th.generate_images((10, 10, 10))
        rotation = 1  # once clockwise
        images.data[:, 0, 0] = 42  # set all images at 0,0 to 42

        cached_memory = get_memory_usage_linux(kb=True)[0]

        RotateFilter.filter_func(images, rotation)

        self.assertLess(
            get_memory_usage_linux(kb=True)[0], cached_memory * 1.1)
Ejemplo n.º 12
0
 def test_memory_executed_sf(self):
     sf = ['size=5']
     cached_memory = get_memory_usage_linux(kb=True)[0]
     self.do_stripe_removal(sf=sf)
     self.assertLess(
         get_memory_usage_linux(kb=True)[0], cached_memory * 1.1)
Ejemplo n.º 13
0
 def test_memory_executed_wf(self):
     wf = ["level=1"]
     cached_memory = get_memory_usage_linux(kb=True)[0]
     self.do_stripe_removal(wf=wf)
     self.assertLess(
         get_memory_usage_linux(kb=True)[0], cached_memory * 1.1)