def test_assert_mem_usage_factor(self):
        a = np.ones((1000, 1000), dtype=np.uint8)

        checked_dumb_function = assert_mem_usage_factor(
            100.0, comparison_input_arg='input_array')(dumb_function)
        result = checked_dumb_function(2, 3, 4, input_array=a)

        checked_dumb_function = assert_mem_usage_factor(
            1.0, comparison_input_arg=3)(dumb_function)
        try:
            result = checked_dumb_function(2, 3, 4, a)
        except AssertionError as ex:
            if 'memory' not in ex.args[0]:
                raise
        except:
            raise
        else:
            assert False, "Expected assertion wasn't raised."

        checked_dumb_function = assert_mem_usage_factor(
            1.0, comparison_input_arg='input_array')(dumb_function)
        try:
            result = checked_dumb_function(2, 3, 4, input_array=a)
        except AssertionError as ex:
            if 'memory' not in ex.args[0]:
                raise
        except:
            raise
        else:
            assert False, "Expected assertion wasn't raised."
    def test_assert_mem_usage_factor(self):
        a = np.ones( (1000,1000), dtype=np.uint8 )

        checked_dumb_function = assert_mem_usage_factor(100.0, comparison_input_arg='input_array')(dumb_function)
        result = checked_dumb_function(2, 3, 4, input_array=a)
        
        checked_dumb_function = assert_mem_usage_factor(1.0, comparison_input_arg=3)(dumb_function)
        try:
            result = checked_dumb_function(2, 3, 4, a)
        except AssertionError as ex:
            if 'memory' not in ex.args[0]:
                raise
        except:
            raise
        else:
            assert False, "Expected assertion wasn't raised."

        checked_dumb_function = assert_mem_usage_factor(1.0, comparison_input_arg='input_array')(dumb_function)
        try:
            result = checked_dumb_function(2, 3, 4, input_array=a)
        except AssertionError as ex:
            if 'memory' not in ex.args[0]:
                raise
        except:
            raise
        else:
            assert False, "Expected assertion wasn't raised."
Esempio n. 3
0
    def test_memory_usage(self):
        # Create a volume with an empty region
        grayscale = self.grayscale.copy()
        grayscale[0:100] = 0

        mask = assert_mem_usage_factor(6.1)(find_large_empty_regions)(
            grayscale)
        pred = assert_mem_usage_factor(4.1)(naive_membrane_predictions)(
            grayscale, mask)
        assert_mem_usage_factor(3.1)(normalize_channels_in_place)(pred)
        supervoxels = assert_mem_usage_factor(2.1)(seeded_watershed)(pred,
                                                                     mask)
    def test_memory_usage(self):
        pmap = self._gen_input_data(3)

        # Wrap the segmentation function in this decorator, to verify it's memory usage.
        ws_output = assert_mem_usage_factor(2.5)(wsDtSegmentation)(
            pmap, 0.5, 0, 10, 0.1, 0.1, groupSeeds=False)
        assert ws_output.max() == 8

        # Now try again, with groupSeeds=True
        # Note: This is a best-case scenario for memory usage, since the memory
        #       usage of the seed-grouping function depends on the NUMBER of seeds,
        #       and we have very few seeds in this test.
        ws_output = assert_mem_usage_factor(3.5)(wsDtSegmentation)(
            pmap, 0.5, 0, 10, 0.1, 0.1, groupSeeds=True)
        assert ws_output.max() == 8
    def test_c_contiguous(self):
        original = np.random.random((100, 100, 100)).astype(np.float32)
        assert original.flags['C_CONTIGUOUS']

        # RAM is allocated for the lz4 buffers, but there should be 0.0 numpy array allocations
        compress = assert_mem_usage_factor(0.0)(CompressedNumpyArray)
        compressed = compress(original)

        # No new numpy arrays needed for deserialization except for the resut itself.
        uncompress = assert_mem_usage_factor(1.0,
                                             comparison_input_arg=original)(
                                                 compressed.deserialize)
        uncompressed = uncompress()

        assert uncompressed.flags['C_CONTIGUOUS']
        assert (uncompressed == original).all()
    def test_non_contiguous(self):
        original = np.random.random((100, 100, 100)).astype(np.float32)
        original = original.transpose(1, 2, 0)
        assert not original.flags.contiguous

        # Since this array isn't contiguous, we need *some* overhead as the data is copied.
        # But it should only be 1 slice's worth.
        compress = assert_mem_usage_factor(0.01)(CompressedNumpyArray)
        compressed = compress(original)

        # But decompression still requires no numpy allocations beyond the result itself.
        uncompress = assert_mem_usage_factor(1.0,
                                             comparison_input_arg=original)(
                                                 compressed.deserialize)
        uncompressed = uncompress()

        assert (uncompressed == original).all()
    def test_1d_array(self):
        """
        A previous version of CompressedNumpyArray didn't support 1D arrays.
        Now it is supported as a special case.
        """
        original = np.random.random((1000, )).astype(np.float32)

        # RAM is allocated for the lz4 buffers, but there should be 0.0 numpy array allocations
        compress = assert_mem_usage_factor(0.0)(CompressedNumpyArray)
        compressed = compress(original)

        # No new numpy arrays needed for deserialization except for the result itself.
        uncompress = assert_mem_usage_factor(1.0,
                                             comparison_input_arg=original)(
                                                 compressed.deserialize)
        uncompressed = uncompress()

        assert (uncompressed == original).all()
    def test_boolean_nonblocks(self):
        """
        CompressedNumpyArray uses special compression for binary images (bool).
        """
        original = np.random.randint(0, 1, (100, 100, 100), np.bool)
        assert original.flags['C_CONTIGUOUS']

        # Some copying is required, especially for non-block aligned data.
        compress = assert_mem_usage_factor(3.0)(CompressedNumpyArray)
        compressed = compress(original)

        # Some copying is required, especially for non-block aligned data.
        uncompress = assert_mem_usage_factor(5.0,
                                             comparison_input_arg=original)(
                                                 compressed.deserialize)
        uncompressed = uncompress()

        assert uncompressed.flags['C_CONTIGUOUS']
        assert (uncompressed == original).all()
    def test_uint64_nonblocks(self):
        """
        CompressedNumpyArray uses special compression for labels (uint64).
        It handles non-aligned data in a somewhat clumsy way, so the RAM requirements are higher.
        """
        original = np.random.randint(1, 100, (100, 100, 100),
                                     np.uint64)  # Not 64px block-aligned
        assert original.flags['C_CONTIGUOUS']

        # Copies are needed.
        compress = assert_mem_usage_factor(3.0)(CompressedNumpyArray)
        compressed = compress(original)

        uncompress = assert_mem_usage_factor(3.0,
                                             comparison_input_arg=original)(
                                                 compressed.deserialize)
        uncompressed = uncompress()

        assert (uncompressed == original).all()
    def test_uint64_blocks(self):
        """
        CompressedNumpyArray uses special compression for labels (uint64),
        as long as they are aligned to 64px sizes.
        """
        original = np.random.randint(1, 100, (128, 128, 128), np.uint64)
        assert original.flags['C_CONTIGUOUS']

        # RAM is allocated for the compressed buffers, but there should be only small numpy array allocations
        # (Each block must be copied once to a C-contiguous buffer when it is compressed.)
        factor = (64.**3 / 128.**3) * 1.01  # 1% fudge-factor
        compress = assert_mem_usage_factor(factor)(CompressedNumpyArray)
        compressed = compress(original)

        # No new numpy arrays needed for deserialization except for the result itself.
        # (Though there are some copies on the C++ side, not reflected here.)
        uncompress = assert_mem_usage_factor(1.0,
                                             comparison_input_arg=original)(
                                                 compressed.deserialize)
        uncompressed = uncompress()

        assert uncompressed.flags['C_CONTIGUOUS']
        assert (uncompressed == original).all()
    def test_group_seeds_ram_usage(self):
        """
        The original implementation of the groupSeeds option needed
        a lot of RAM, scaling with the number of seeds by N**2.
        The new implementation does the work in batches, so it
        doesn't need as much RAM.  
        
        Here we create a test image that will result in lots of seeds,
        and we'll verify that RAM usage stays under control.
        
        The test image looks roughly like this (seeds marked with 'x'):
        
        +-----------------------------------------------------+
        |  |  |  |  |  |  |  |  |  |  |  |  |  |  |  |  |  |  |
        |                                                     |
        |                                                     |
        |       x  x  x  x  x  x  x  x  x  x  x  x  x  x      |
        |                                                     |
        |                                                     |
        +-----------------------------------------------------+
        """
        input_data = np.zeros((101, 20001), dtype=np.float32)

        # Add borders
        input_data[0] = 1
        input_data[-1] = 1
        input_data[:, 0] = 1
        input_data[:, -1] = 1

        # Add tick marks
        input_data[:10, ::10] = 1

        # Sanity check, try without groupSeeds, make sure we've got a lot of segments
        ws_output = wsDtSegmentation(input_data,
                                     0.5,
                                     0,
                                     0,
                                     0.0,
                                     0.0,
                                     groupSeeds=False)
        assert ws_output.max() > 1900

        # Now check RAM with groupSeeds=True
        ws_output = assert_mem_usage_factor(3.0)(wsDtSegmentation)(
            input_data, 0.5, 0, 0, 2.0, 0.0, groupSeeds=True)
        assert ws_output.max() == 1
Esempio n. 12
0
 def test_mem_usage(self):
     a = np.random.randint(100, 200, size=(100, 100), dtype=np.uint32)
     b = np.random.randint(200, 300, size=(100, 100), dtype=np.uint32)
     _table = assert_mem_usage_factor(20)(contingency_table)(a, b)
Esempio n. 13
0
 def test_mem_usage(self):
     a = 1 + np.arange(1000**2, dtype=np.uint32).reshape((1000, 1000)) // 10
     split, mapping = assert_mem_usage_factor(20)(
         split_disconnected_bodies)(a)
     assert (a == split).all()
     assert mapping == {}