Exemplo n.º 1
0
    def test_normalize(self):
        print('\ntest chunk normalization...')
        arr = np.ones((3, 3, 3), dtype='float32')
        voxel_offset = (-1, -1, -1)
        chunk = Chunk(arr, voxel_offset=voxel_offset)

        mask = np.ones((3, 3, 3), dtype='float32') * 0.5
        mask = Chunk(mask, voxel_offset=voxel_offset)
        chunk *= mask
        self.assertTrue(chunk == mask)
Exemplo n.º 2
0
    def __call__(self, inputs: list, args: str = None):
        voxel_offset = None
        voxel_size = None
        shape = None
        for inp in inputs:
            if isinstance(inp, Chunk):
                voxel_offset = inp.voxel_offset
                voxel_size = inp.voxel_size
                shape = inp.shape
                break

        if len(inputs) == 0 and args is None:
            outputs = self.execute()
        elif len(inputs) == 0 and args is not None:
            outputs = self.execute(args=args)
        elif len(inputs) > 0 and args is None:
            outputs = self.execute(*inputs)
        else:
            outputs = self.execute(*inputs, args=args)

        # automatically convert the ndarrays to Chunks
        if voxel_offset is not None and outputs is not None:
            assert shape is not None
            for idx, output in enumerate(outputs):
                if isinstance(output, np.ndarray):
                    # in case the plugin did some symmetric cropping
                    offset = tuple(
                        vo + (ins - outs) // 2 for vo, ins, outs in zip(
                            voxel_offset, shape[-3:], output.shape[-3:]))
                    outputs[idx] = Chunk(output,
                                         voxel_offset=offset,
                                         voxel_size=voxel_size)

        return outputs
Exemplo n.º 3
0
    def quantize(self, mode: str = 'xy'):
        """transform affinity map to gray scale image

        Args:
            mode (str, optional): tranformation mode. Defaults to 'xy'.

        Raises:
            ValueError: only support mode of xy and z.

        Returns:
            Chunk: the gray scale image chunk
        """
        if mode == 'z':
            # only use the last channel, it is the Z affinity
            # if this is affinitymap
            image = self[-1, :, :, :]
        elif mode == 'xy':
            image = (self[0, ...] + self[1, ...]) / 2.
        else:
            raise ValueError(f'only support xy and z mode, but got {mode}')

        image = (image * 255.).astype(np.uint8)
        image = Chunk(image)
        image.set_properties(self.properties)
        # assert np.issubdtype(image.dtype, np.uint8)
        return image
Exemplo n.º 4
0
def test_non_aligned_input_chunk():
    print('\ntest no aligned block inference engine...')
    patch_size = (32, 256, 256)
    patch_overlap = (4, 64, 64)
    num_output_channels = 2
    input_size = (28 * 2 + 4 + 6, (256 - 64) * 2 + 64 + 7,
                  (256 - 64) * 2 + 64 + 9)
    image = np.random.randint(1, 255, size=input_size, dtype=np.uint8)
    image = Chunk(image)

    with Inferencer(None,
                    None,
                    patch_size,
                    output_patch_overlap=patch_overlap,
                    num_output_channels=num_output_channels,
                    batch_size=5,
                    framework='identity',
                    mask_output_chunk=True) as inferencer:
        output = inferencer(image)

    # only use the first channel to check correctness
    output = output[0, :, :, :]
    #output = np.reshape(output, image.shape)

    image = image.astype(np.float32) / 255
    residual = image - output
    print('maximum difference: ', np.max(residual.array))

    # some of the image voxel is 0, the test can only work with rtol=1
    np.testing.assert_allclose(image, output, rtol=1e-5, atol=1e-5)
Exemplo n.º 5
0
    def test_downsample_upload(self):
        print('test downsample and upload...')
        # compute parameters
        mip = 0
        size = (16, 512, 512)

        # create image dataset using cloud-volume
        img = np.random.randint(np.iinfo(np.uint32).max, size=size)
        img = img.astype(np.uint32)
        chunk = Chunk(img, global_offset=[2, 32, 32])
        # save the input to disk
        volume_path = 'file:///tmp/test/cutout/' + generate_random_string()
        CloudVolume.from_numpy(np.transpose(img),
                               vol_path=volume_path,
                               voxel_offset=(32, 32, 2),
                               chunk_size=(32, 32, 4),
                               max_mip=4,
                               layer_type='segmentation')

        operator = DownsampleUploadOperator(volume_path,
                                            chunk_mip=0,
                                            start_mip=1,
                                            stop_mip=4)
        operator(chunk)
        shutil.rmtree('/tmp/test')
Exemplo n.º 6
0
def read_png_images(path_prefix: str, bbox: BoundingBox, 
                        volume_offset: tuple = (0, 0, 0),
                        voxel_size: tuple = (1, 1, 1),
                        digit_num: int = 5,
                        dtype: np.dtype = np.uint8):
    
    chunk = Chunk.from_bbox(
        bbox, dtype=dtype, 
        pattern='zero', 
        voxel_size=voxel_size
    )
    assert len(bbox.minpt) == 3
    assert len(volume_offset) == 3

    for z in tqdm( range(bbox.minpt[0], bbox.maxpt[0]) ):
        file_name = f'{path_prefix}{z:0>{digit_num}d}.png'
        file_name = path.expanduser(file_name)
        if path.exists(file_name):
            with open(file_name, "rb") as f:
                img = pyspng.load(f.read())
            img = np.expand_dims(img, axis=0)
            img_chunk = Chunk(
                img,
                voxel_offset=(
                  z+volume_offset[0], volume_offset[1], volume_offset[2]),
                voxel_size=voxel_size
            )
            chunk.blend(img_chunk)
        else:
            logging.warning(f'image file do not exist: {file_name}')
    
    return chunk
Exemplo n.º 7
0
def read_png_images(path_prefix: str,
                    bbox: Bbox,
                    volume_offset: tuple = (0, 0, 0),
                    voxel_size: tuple = (1, 1, 1),
                    dtype: np.dtype = np.uint8):

    chunk = Chunk.from_bbox(bbox,
                            dtype=dtype,
                            all_zero=True,
                            voxel_size=voxel_size)
    assert len(bbox.minpt) == 3
    assert len(volume_offset) == 3

    for z in tqdm(range(bbox.minpt[0], bbox.maxpt[0])):
        file_name = '{}{:0>5d}.png'.format(path_prefix, z)
        file_name = path.expanduser(file_name)
        if path.exists(file_name):
            img = Image.open(file_name)
            img = np.asarray(img)
            img = np.expand_dims(img, axis=0)
            img_chunk = Chunk(img,
                              voxel_offset=(z + volume_offset[0],
                                            volume_offset[1],
                                            volume_offset[2]),
                              voxel_size=voxel_size)
            chunk.blend(img_chunk)
        else:
            logging.warning(f'image file do not exist: {file_name}')

    return chunk
Exemplo n.º 8
0
    def __call__(self, output_bbox):

        chunk_slices = tuple(
            slice(s.start - m, s.stop + m)
            for s, m in zip(output_bbox.to_slices(), self.expand_margin_size))
        if self.verbose:
            print('cutout {} from {}'.format(chunk_slices[::-1],
                                             self.volume_path))

        # always reverse the indexes since cloudvolume use x,y,z indexing
        chunk = self.vol[chunk_slices[::-1]]
        # the cutout is fortran ordered, so need to transpose and make it C order
        chunk = np.transpose(chunk)
        # we can delay this transpose later
        # actually we do not need to make it contiguous
        # chunk = np.ascontiguousarray(chunk)

        # if the channel number is 1, squeeze it as 3d array
        # this should not be neccessary
        # TODO: remove this step and use 4D array all over this package.
        # always use 4D array will simplify some operations
        global_offset = tuple(s.start for s in chunk_slices)
        if chunk.shape[0] == 1:
            chunk = np.squeeze(chunk, axis=0)
        else:
            global_offset = (chunk.shape[0], ) + global_offset

        chunk = Chunk(chunk, global_offset=global_offset)

        if self.blackout_sections:
            chunk = self._blackout_sections(chunk)

        if self.validate_mip:
            self._validate_chunk(chunk)
        return chunk
Exemplo n.º 9
0
    def __call__(self, affs: np.ndarray, fragments: np.ndarray = None):
        """
        Parameters:
        -----------
        affs: affinity map with 4 dimensions: channel, z, y, x
        """
        if isinstance(affs, Chunk):
            # the segmentation is 3d, so we only need the zyx
            global_offset = affs.global_offset[-3:]
        else:
            global_offset = None

        # our affinity map channel order is x,y,z!
        # meaning the first channel is x, the second is y,
        # the third is z. We have to reverse the channel.
        if self.flip_channel:
            affs = np.flip(affs, axis=0)
        
        # waterz need datatype float32 and 
        # the array memory layout is contiguous
        affs = np.ascontiguousarray(affs, dtype=np.float32)

        # the output is a generator, and the computation is delayed
        seg_generator = agglomerate(
            affs, [self.threshold], fragments=fragments,
            aff_threshold_low=self.aff_threshold_low,
            aff_threshold_high=self.aff_threshold_high,
            scoring_function=self.scoring_function,
            force_rebuild=False)
        # there is only one threshold, so there is also only one result
        # in generator
        seg = next(seg_generator)
        return Chunk(seg, global_offset=global_offset)
Exemplo n.º 10
0
def execute(affs: Chunk, fragments: np.ndarray = None):
    """
    Mean/max agglomeration of affinity map including watershed step.

    Parameters:
    -----------
    affs: affinity map with 4 dimensions: channel, z, y, x
    """
    properties = affs.properties

    # our affinity map channel order is x,y,z!
    # meaning the first channel is x, the second is y,
    # the third is z. We have to reverse the channel for waterz.
    if flip_channel:
        affs = np.flip(affs, axis=0)
    
    # waterz need datatype float32 and 
    # the array memory layout is contiguous
    affs = np.ascontiguousarray(affs, dtype=np.float32)

    # the output is a generator, and the computation is delayed
    seg_generator = agglomerate(
        affs, [threshold], fragments=fragments,
        aff_threshold_low=aff_threshold_low,
        aff_threshold_high=aff_threshold_high,
        scoring_function=scoring_function,
        force_rebuild=False)
    # there is only one threshold, so there is also only one result
    # in generator
    seg = next(seg_generator)
    seg = Chunk(seg)
    seg.set_properties(properties)

    return [seg]
Exemplo n.º 11
0
 def create_chunk_with_zeros(self, bbox, num_channels, dtype):
     """Create a fake all zero chunk. 
     this is used in skip some operation based on mask."""
     shape = (num_channels, *bbox.size3())
     arr = np.zeros(shape, dtype=dtype)
     chunk = Chunk(arr, voxel_offset=(0, *bbox.minpt))
     return chunk
Exemplo n.º 12
0
def create_chunk(size:tuple = (7, 8, 9), global_offset=(-2, -3, -4), 
                 dtype=np.float32):
    # make the tests consistent in multiple runs
    np.random.seed(1)
    arr = np.random.rand(*size).astype(dtype)
    chunk = Chunk(arr, global_offset)
    return chunk
Exemplo n.º 13
0
    def __call__(self, seg: Chunk):
        """Mask out selected objects in the segmentation chunk.
        
        Parameters
        ------------
        seg:
            3D segmentation chunk.
        """
        assert isinstance(seg, Chunk)
        assert seg.ndim == 3
        assert np.issubdtype(seg.dtype, np.integer)
        
        global_offset = seg.global_offset
        # use ndarray after getting the bounding box
        seg = seg.array

        seg = self._only_keep_selected(seg)
        if np.alltrue(seg == 0):
            if self.verbose:
                print('no segmentation id is selected!')
            return
        seg = self._remove_dust(seg)
        
        seg = Chunk(seg, global_offset=global_offset)
        return seg
Exemplo n.º 14
0
 def test_where(self):
     arr = np.asarray([0.1, 0.7])
     selected1 = np.where(arr > 0.5)
     chunk = Chunk(arr, (-1, ))
     selected2 = chunk.where(chunk > 0.5)
     for i1, i2 in zip(selected1, selected2):
         # print('i1: {}, i2: {}'.format(i1, i2))
         self.assertTrue((i1 - i2 == 1).all())
Exemplo n.º 15
0
    def test_normalize(self):
        print('\ntest chunk normalization...')
        arr = np.ones((3, 3, 3), dtype='float32')
        chunk = Chunk(arr, (-1, -1, -1))

        mask = np.ones((3, 3, 3), dtype='float32') * 0.5
        chunk *= mask
        self.assertTrue(np.alltrue(chunk == mask))
Exemplo n.º 16
0
def execute(seg: Chunk):
    properties = seg.properties
    array = fill_voids.fill(seg.array)
    seg2 = Chunk(array)
    seg2.set_properties(properties)
    return [
        seg2,
    ]
def test_psd_map():
    print('test downsample and upload...')
    # compute parameters
    size = (16, 512, 512)

    # create image dataset using cloud-volume
    img = np.random.rand(*size).astype(np.float32)
    chunk = Chunk(img, voxel_offset=[2, 32, 32])
    hierarchical_downsample(chunk, layer_type='image')
def test_image():
    print('test downsample and upload...')
    # compute parameters
    size = (16, 512, 512)

    # create image dataset using cloud-volume
    img = np.random.randint(np.iinfo(np.uint8).max, size=size, dtype=np.uint8)
    chunk = Chunk(img, voxel_offset=[2, 32, 32])
    hierarchical_downsample(chunk, layer_type='image')
Exemplo n.º 19
0
 def quantize(self):
     # only use the last channel, it is the Z affinity
     # if this is affinitymap
     image = self[-1, :, :, :]
     image = (image * 255).astype(np.uint8)
     image = Chunk(image,
                   voxel_offset=self.voxel_offset,
                   voxel_size=self.voxel_size)
     return image
Exemplo n.º 20
0
    def __new__(cls, array, **kwargs):
        if 'global_offset' in kwargs:
            global_offset = kwargs['global_offset']
        elif isinstance(array, Chunk):
            global_offset = array.global_offset
        else:
            global_offset = None

        obj = Chunk(array, global_offset=global_offset, *kwargs).view(cls)
        return obj
Exemplo n.º 21
0
def test_maskout():
    mask = np.ones((8, 16, 4), dtype=np.uint32)
    mask[:4, :8, :2] = 0
    mask = Chunk(mask, voxel_offset=(-2, -3, -4), voxel_size=(2, 4, 8))
    image = create_chunk(size=(16, 64, 32), voxel_size=(1, 1, 1), dtype=np.uint8)
    mask.maskout(image)
    np.testing.assert_array_equal(image.array[:8, :32, :16], 0)

    affs = create_chunk(size=(3, 16, 64, 32), voxel_size=(1, 1, 1), dtype=np.float32)
    mask.maskout(affs)
    np.testing.assert_array_equal(affs.array[:, :8, :32, :16], 0)
Exemplo n.º 22
0
def array_to_chunk(arr: Union[np.ndarray, Chunk], voxel_offset: Cartesian,
                   voxel_size: Cartesian, shape: tuple):
    if isinstance(arr, np.ndarray):
        # in case the plugin did some symmetric cropping
        offset = tuple(
            vo + (ins - outs) // 2
            for vo, ins, outs in zip(voxel_offset, shape[-3:], arr.shape[-3:]))
        return Chunk(arr, voxel_offset=offset, voxel_size=voxel_size)
    elif isinstance(arr, Chunk):
        return arr
    else:
        raise TypeError(f'only support ndarray and Chunk, but got {type(arr)}')
Exemplo n.º 23
0
    def __call__(self, output_bbox: BoundingBox):
        # if we do not clone this bounding box,
        # the bounding box in task will be modified!
        assert isinstance(output_bbox, BoundingBox)
        output_bbox = output_bbox.clone()
        output_bbox.adjust(self.expand_margin_size)
        chunk_slices = output_bbox.to_slices()

        if self.dry_run:
            # input_bbox = BoundingBox.from_slices(chunk_slices)
            # we can not use pattern=zero since it might got skipped by
            # the operator of skip-all-zero
            return Chunk.from_bbox(
                output_bbox,
                pattern='random',
                dtype=self.vol.dtype,
                voxel_size=Cartesian.from_collection(
                    self.vol.resolution[::-1]),
            )

        logging.info('cutout {} from {}'.format(chunk_slices[::-1],
                                                self.volume_path))

        # always reverse the indexes since cloudvolume use x,y,z indexing
        chunk = self.vol[chunk_slices[::-1]]
        chunk = np.asarray(chunk)
        # the cutout is fortran ordered, so need to transpose and make it C order
        chunk = chunk.transpose()

        # we can delay this transpose later
        # actually we do not need to make it contiguous
        # chunk = np.ascontiguousarray(chunk)

        # if the channel number is 1, squeeze it as 3d array
        # this should not be neccessary
        # TODO: remove this step and use 4D array all over this package.
        # always use 4D array will simplify some operations
        # voxel_offset = Cartesian(s.start for s in chunk_slices)
        if chunk.shape[0] == 1:
            chunk = np.squeeze(chunk, axis=0)

        chunk = Chunk(chunk,
                      voxel_offset=output_bbox.start,
                      voxel_size=Cartesian.from_collection(
                          self.vol.resolution[::-1]))

        if self.blackout_sections:
            chunk = self._blackout_sections(chunk)

        if self.validate_mip:
            self._validate_chunk(chunk)

        return chunk
Exemplo n.º 24
0
def execute(bbox: Bbox):
    print('bounding box: ', bbox)
    box = [tuple(bbox.minpt), tuple(bbox.maxpt)]
    subvol = dvid.fetch_labelmap_voxels(server,
                                        uuid,
                                        instance,
                                        box,
                                        scale=0,
                                        supervoxels=supervoxels)
    # print('cutout volume: \n', subvol)
    chunk = Chunk(subvol, voxel_offset=bbox.minpt)
    return [chunk]
Exemplo n.º 25
0
def test_segmentation():
    print('test downsample and upload...')
    # compute parameters
    mip = 0
    size = (16, 512, 512)

    # create image dataset using cloud-volume
    img = np.random.randint(np.iinfo(np.uint32).max,
                            size=size,
                            dtype=np.uint32)
    chunk = Chunk(img, global_offset=[2, 32, 32])
    hierarchical_downsample(chunk)
Exemplo n.º 26
0
    def _construct_output_chunk_mask(self, input_chunk):
        if not self.mask_output_chunk:
            return

        if self.verbose:
            print('creating output chunk mask...')

        if self.output_chunk_mask is None or not np.array_equal(
                input_chunk.shape, self.output_chunk_mask.shape):
            # To-Do: clean up extra temporal files if we created
            # multiple mmap files
            #output_mask_mmap_file = mktemp(suffix='.dat')
            ## the memory map is initialized with 0 in default
            #output_mask_array = np.memmap(output_mask_mmap_file,
            #                                   dtype=self.dtype, mode='w+',
            #                                   shape=self.output_size)
            output_mask_array = np.zeros(self.output_size, self.dtype)
        else:
            output_chunk_mask_array = self.output_chunk_mask.array
            output_chunk_mask_array.fill(0)

        output_global_offset = tuple(
            io + ocso
            for io, ocso in zip(input_chunk.global_offset, self.output_offset))

        self.output_chunk_mask = Chunk(output_mask_array,
                                       global_offset=output_global_offset)

        assert len(self.patch_slices_list) > 0
        for _, output_patch_slice in self.patch_slices_list:
            # accumulate weights using the patch mask in RAM
            patch_global_offset = tuple(s.start for s in output_patch_slice)
            patch_mask = Chunk(self.patch_inferencer.output_patch_mask_numpy,
                               global_offset=patch_global_offset)
            self.output_chunk_mask.blend(patch_mask)

        # normalize weight, so accumulated inference result multiplies
        # this mask will result in 1
        self.output_chunk_mask.array = 1.0 / self.output_chunk_mask.array
Exemplo n.º 27
0
    def __call__(self, output_bbox):
        #gevent.monkey.patch_all(thread=False)
        vol = CloudVolume(self.volume_path,
                          bounded=False,
                          fill_missing=self.fill_missing,
                          progress=self.verbose,
                          mip=self.mip,
                          cache=False,
                          use_https=self.use_https,
                          green_threads=True)

        chunk_slices = tuple(
            slice(s.start - m, s.stop + m)
            for s, m in zip(output_bbox.to_slices(), self.expand_margin_size))

        if self.dry_run:
            input_bbox = Bbox.from_slices(chunk_slices)
            return Chunk.from_bbox(input_bbox)

        if self.verbose:
            print('cutout {} from {}'.format(chunk_slices[::-1],
                                             self.volume_path))

        # always reverse the indexes since cloudvolume use x,y,z indexing
        chunk = vol[chunk_slices[::-1]]
        # the cutout is fortran ordered, so need to transpose and make it C order
        chunk = chunk.transpose()
        # we can delay this transpose later
        # actually we do not need to make it contiguous
        # chunk = np.ascontiguousarray(chunk)

        # if the channel number is 1, squeeze it as 3d array
        # this should not be neccessary
        # TODO: remove this step and use 4D array all over this package.
        # always use 4D array will simplify some operations
        global_offset = tuple(s.start for s in chunk_slices)
        if chunk.shape[0] == 1:
            chunk = np.squeeze(chunk, axis=0)
        else:
            global_offset = (chunk.shape[0], ) + global_offset

        chunk = Chunk(chunk, global_offset=global_offset)

        if self.blackout_sections:
            chunk = self._blackout_sections(chunk)

        if self.validate_mip:
            self._validate_chunk(chunk, vol)

        return chunk
Exemplo n.º 28
0
def test_normalize_section_contrast():
    DIR = os.path.join(os.path.dirname(__file__), '../data/levels/1/')
    #histogram = np.random.randint(10000, size=256, dtype=np.uint32)
    image = np.arange(256, dtype=np.uint8).reshape(1, 16, 16)
    image = Chunk(image, voxel_offset=(26774, 234, 456))
    
    levels_path = 'precomputed://file://' + DIR
    operator = NormalizeSectionContrastOperator(levels_path)
    normalized = operator(image)
    
    normalized_filename = os.path.join(DIR, '../normalized.npz')
    #np.savez( normalized_filename , normalized=normalized)
    groundtruth = np.load(normalized_filename)['normalized']
    np.testing.assert_array_equal(normalized, groundtruth)
Exemplo n.º 29
0
def normalize_section_shang(image: np.ndarray, nominalmin: float,
                            nominalmax: float, clipvalues: bool):
    """
    Parameters
    ------------
    image:
        image volume.
    nominalmin:
        min threshold
    nominalmax:
        max threshold
    clipvalues:
        clip values or not.
    """
    assert nominalmin < nominalmax
    assert image.ndim == 3
    global_offset = image.global_offset
    originaltype = image.dtype
    arr = image.astype(np.float32)

    # number of bits per voxel
    nbits = np.dtype(originaltype).itemsize * 8
    default_nominalmax = float(2**nbits - 1)

    nominalmin = nominalmin if nominalmin is not None else 0.0
    nominalmax = nominalmax if nominalmax is not None else default_nominalmax

    normalization = 'fill'

    # stack/chunk-wise normalization first if necessary (for blank slices within a valid stack)
    #arr = normalize(arr, normalization, target_scale = [-1,1], min_max_invalid = [True]*2, make_copy=False)

    # slice-wise normalization
    # Note in chunkflow the first dim is z/slice
    for ii in range(arr.shape[0]):
        normalize(arr[ii, :, :],
                  normalization,
                  target_scale=[nominalmin, nominalmax],
                  min_max_invalid=[True, True],
                  do_clipping=clipvalues,
                  make_copy=False)

    # cast to original data type if necessary
    #arr = np.round(arr)
    #arr = arr.astype(originaltype)

    return Chunk(arr, global_offset=global_offset)
    def __call__(self, chunk):
        # this is a image, not affinitymap
        assert chunk.ndim == 3
        # we have to use np.dtype function to make it match
        # https://stackoverflow.com/questions/26921836/correct-way-to-test-for-numpy-dtype
        assert chunk.dtype is np.dtype(np.uint8)

        for z in range(chunk.bbox.minpt[-3], chunk.bbox.maxpt[-3]):
            lookup_table = self.fetch_lookup_table(z)
            slices = (slice(z, z + 1), *chunk.slices[-2:])
            image = chunk.cutout(slices)
            image_global_offset = image.global_offset
            image = lookup_table[image]
            image = Chunk(image, global_offset=image_global_offset)
            chunk.save(image)

        return chunk