Пример #1
0
def test_non_aligned_input_chunk():
    print('\ntest no aligned block inference engine...')
    patch_size = (32, 256, 256)
    patch_overlap = (4, 64, 64)
    num_output_channels = 2
    input_size = (28 * 2 + 4 + 6, (256 - 64) * 2 + 64 + 7,
                  (256 - 64) * 2 + 64 + 9)
    image = np.random.randint(1, 255, size=input_size, dtype=np.uint8)
    image = Chunk(image)

    with Inferencer(None,
                    None,
                    patch_size,
                    output_patch_overlap=patch_overlap,
                    num_output_channels=num_output_channels,
                    batch_size=5,
                    framework='identity',
                    mask_output_chunk=True) as inferencer:
        output = inferencer(image)

    # only use the first channel to check correctness
    output = output[0, :, :, :]
    #output = np.reshape(output, image.shape)

    image = image.astype(np.float32) / 255
    residual = image - output
    print('maximum difference: ', np.max(residual.array))

    # some of the image voxel is 0, the test can only work with rtol=1
    np.testing.assert_allclose(image, output, rtol=1e-5, atol=1e-5)
Пример #2
0
    def __call__(self, chunk: Chunk):
        assert isinstance(chunk, Chunk)
        if chunk.is_affinity_map:
            properties = chunk.properties
            chunk = (chunk[1, ...] + chunk[2, ...]) / 2. * 255.
            chunk = chunk.astype(np.uint8)
            chunk = Chunk(chunk)
            chunk.set_properties(properties)

        assert chunk.ndim == 3
        for z in tqdm(range(chunk.voxel_offset[0], chunk.bbox.maxpt[0])):
            img = chunk.cutout(
                (slice(z, z + 1), chunk.slices[1], chunk.slices[2]))
            img = img.array[0, :, :]
            filename = os.path.join(self.output_path, f"{z:05d}.png")
            with open(filename, "wb") as f:
                f.write(pyspng.encode(img))
Пример #3
0
 def _append_segmentation_layer(self,
                                viewer_state: ng.viewer_state.ViewerState,
                                chunk_name: str, chunk: Chunk):
     if np.issubdtype(chunk.dtype, np.int64):
         assert chunk.min() >= 0
         chunk = chunk.astype(np.uint64)
     voxel_size = self._get_voxel_size(chunk)
     dimensions = ng.CoordinateSpace(scales=voxel_size,
                                     units=['nm', 'nm', 'nm'],
                                     names=['z', 'y', 'x'])
     viewer_state.layers.append(
         name=chunk_name,
         layer=ng.LocalVolume(
             data=chunk,
             dimensions=dimensions,
             # offset is in nm, not voxels
             # chunkflow use C order with zyx,
             # while neuroglancer use F order with xyz
             voxel_offset=chunk.voxel_offset,
         ))
Пример #4
0
def test_aligned_input_chunk_with_croped_patch():
    print('\ntest block inference engine...')
    input_patch_size = (20, 256, 256)
    output_patch_size = (16, 192, 192)
    output_patch_crop_margin = (2, 32, 32)
    output_patch_overlap = (2, 32, 32)
    input_patch_overlap = (6, 96, 96)
    input_patch_stride = (14, 160, 160)
    input_size = (2 * 14 + 6, 2 * 160 + 96, 2 * 160 + 96)
    num_output_channels = 1

    image = np.random.randint(1, 255, size=input_size, dtype=np.uint8)

    # make sure that it works with arbitrary global offset
    image = Chunk(image, global_offset=(123, 345, 567))

    with Inferencer(None,
                    None,
                    input_patch_size,
                    output_patch_size=output_patch_size,
                    output_patch_overlap=output_patch_overlap,
                    patch_num=(2, 2, 2),
                    num_output_channels=num_output_channels,
                    framework='identity',
                    batch_size=5,
                    mask_output_chunk=False) as chunk_inferencer:
        output = chunk_inferencer(image)

    # only use the first channel to check correctness
    output = output[0, :, :, :]
    assert output.ndim == 3

    # we need to crop the patch overlap since the values were changed
    image = image[4:-4, 64:-64, 64:-64]
    image = image.astype(np.float32) / 255

    print('maximum difference: ', np.max(image - output))

    # some of the image voxel is 0, the test can only work with rtol=1
    np.testing.assert_allclose(image, output, rtol=1e-5, atol=1e-5)
Пример #5
0
def test_aligned_patch_num():
    print('\ntest block inference with patch number...')
    patch_size = (32, 256, 256)
    patch_overlap = (4, 64, 64)
    patch_num = (2, 2, 2)
    num_output_channels = 2
    dtype = 'float16'

    image = np.random.randint(1,
                              255,
                              size=(28 * 2 + 4, (256 - 64) * 2 + 64,
                                    (256 - 64) * 2 + 64),
                              dtype=np.uint8)
    image = Chunk(image)

    with Inferencer(None,
                    None,
                    patch_size,
                    output_patch_overlap=patch_overlap,
                    num_output_channels=num_output_channels,
                    patch_num=patch_num,
                    framework='identity',
                    dtype=dtype,
                    batch_size=5,
                    mask_output_chunk=False) as block_inference_engine:
        output = block_inference_engine(image)

    # only use the first channel to check correctness
    output = output[0, :, :, :]

    # we need to crop the patch overlap since the values were changed
    image = image[patch_overlap[0]:-patch_overlap[0],
                  patch_overlap[1]:-patch_overlap[1],
                  patch_overlap[2]:-patch_overlap[2]]

    image = image.astype(dtype) / 255
    print('maximum difference: ', np.max(image - output))

    # some of the image voxel is 0, the test can only work with rtol=1
    np.testing.assert_allclose(image, output, rtol=1e-3, atol=1e-3)
Пример #6
0
    def _append_probability_map_layer(
            self, viewer_state: ng.viewer_state.ViewerState, chunk_name: str,
            chunk: Chunk):
        if chunk.dtype == np.dtype('<f4') or chunk.dtype == np.dtype(
                'float16'):
            chunk = chunk.astype(np.float32)

        voxel_size = self._get_voxel_size(chunk)
        # chunk = np.ascontiguousarray(chunk)
        if chunk.shape[0] == 1:
            shader = """void main() {
emitGrayscale(toNormalized(getDataValue(0)));
}
"""
        elif chunk.shape[0] == 2:
            shader = """void main() {
emitRGB(vec3(toNormalized(getDataValue(0)),
            toNormalized(getDataValue(1)),
            0.));
}
"""
        else:
            shader = """void main() {
emitRGB(vec3(toNormalized(getDataValue(0)),
            toNormalized(getDataValue(1)),
            toNormalized(getDataValue(2))));
}
"""
        dimensions = ng.CoordinateSpace(scales=(1, ) + voxel_size,
                                        units=['', 'nm', 'nm', 'nm'],
                                        names=['c^', 'z', 'y', 'x'])
        viewer_state.layers.append(
            name=chunk_name,
            layer=ng.LocalVolume(
                data=chunk.array,
                dimensions=dimensions,
                # offset is in nm, not voxels
                voxel_offset=(0, *chunk.voxel_offset),
            ),
            shader=shader)
Пример #7
0
def test_aligned_input_chunk():
    print('\ntest block inference engine...')
    patch_size = (32, 256, 256)
    patch_overlap = (4, 64, 64)
    num_output_channels = 2

    block_inference_engine = Engine(None,
                                    None,
                                    patch_size=patch_size,
                                    patch_overlap=patch_overlap,
                                    num_output_channels=num_output_channels,
                                    framework='identity',
                                    batch_size=5,
                                    mask_output_chunk=False)

    image = np.random.randint(1,
                              255,
                              size=(28 * 2 + 4, (256 - 64) * 2 + 64,
                                    (256 - 64) * 2 + 64),
                              dtype=np.uint8)
    image = Chunk(image)
    output = block_inference_engine(image)
    # only use the first channel to check correctness
    output = output[0, :, :, :]
    output = np.reshape(output, image.shape)

    # we need to crop the patch overlap since the values were changed
    image = image[patch_overlap[0]:-patch_overlap[0],
                  patch_overlap[1]:-patch_overlap[1],
                  patch_overlap[2]:-patch_overlap[2]]
    output = output[patch_overlap[0]:-patch_overlap[0],
                    patch_overlap[1]:-patch_overlap[1],
                    patch_overlap[2]:-patch_overlap[2]]

    image = image.astype(np.float32) / 255
    print('maximum difference: ', np.max(image - output))

    # some of the image voxel is 0, the test can only work with rtol=1
    np.testing.assert_allclose(image, output, rtol=1e-5, atol=1e-5)
Пример #8
0
    def __call__(self, input_chunk: np.ndarray):
        """
        args:
            input_chunk (Chunk): input chunk with global offset
        """
        assert isinstance(input_chunk, Chunk)

        self._update_parameters_for_input_chunk(input_chunk)
        output_buffer = self._get_output_buffer(input_chunk)

        if not self.mask_output_chunk:
            self._check_alignment()

        if self.dry_run:
            print('dry run, return a special artifical chunk.')
            size = output_buffer.shape

            if self.mask_myelin_threshold:
                # eleminate the myelin channel
                size = (size[0] - 1, *size[1:])

            return Chunk.create(size=size,
                                dtype=output_buffer.dtype,
                                voxel_offset=output_buffer.global_offset)

        if input_chunk == 0:
            print('input is all zero, return zero buffer directly')
            if self.mask_myelin_threshold:
                assert output_buffer.shape[0] == 4
                return output_buffer[:-1, ...]
            else:
                return output_buffer

        if np.issubdtype(input_chunk.dtype, np.integer):
            # normalize to 0-1 value range
            dtype_max = np.iinfo(input_chunk.dtype).max
            input_chunk = input_chunk.astype(self.dtype) / dtype_max

        if self.verbose:
            chunk_time_start = time.time()

        # iterate the offset list
        for i in tqdm(range(0, len(self.patch_slices_list), self.batch_size),
                      disable=not self.verbose,
                      desc='ConvNet inference for patches: '):
            if self.verbose:
                start = time.time()

            batch_slices = self.patch_slices_list[i:i + self.batch_size]
            for batch_idx, slices in enumerate(batch_slices):
                self.input_patch_buffer[batch_idx,
                                        0, :, :, :] = input_chunk.cutout(
                                            slices[0]).array

            if self.verbose > 1:
                end = time.time()
                print('prepare %d input patches takes %3f sec' %
                      (self.batch_size, end - start))
                start = end

            # the input and output patch is a 5d numpy array with
            # datatype of float32, the dimensions are batch/channel/z/y/x.
            # the input image should be normalized to [0,1]
            output_patch = self.patch_inferencer(self.input_patch_buffer)

            if self.verbose > 1:
                assert output_patch.ndim == 5
                end = time.time()
                print('run inference for %d patch takes %3f sec' %
                      (self.batch_size, end - start))
                start = end

            for batch_idx, slices in enumerate(batch_slices):
                # only use the required number of channels
                # the remaining channels are dropped
                # the slices[0] is for input patch slice
                # the slices[1] is for output patch slice
                offset = (0, ) + tuple(s.start for s in slices[1])
                output_chunk = Chunk(output_patch[batch_idx, :, :, :, :],
                                     global_offset=offset)

                ## save some patch for debug
                #bbox = output_chunk.bbox
                #if bbox.minpt[-1] < 94066 and bbox.maxpt[-1] > 94066 and \
                #        bbox.minpt[-2]<81545 and bbox.maxpt[-2]>81545 and \
                #        bbox.minpt[-3]<17298 and bbox.maxpt[-3]>17298:
                #    print('save patch: ', output_chunk.bbox)
                #    breakpoint()
                #    output_chunk.to_tif()
                #    #input_chunk.cutout(slices[0]).to_tif()

                output_buffer.blend(output_chunk)

            if self.verbose > 1:
                end = time.time()
                print('blend patch takes %3f sec' % (end - start))

        if self.verbose:
            print("Inference of whole chunk takes %3f sec" %
                  (time.time() - chunk_time_start))

        if self.mask_output_chunk:
            output_buffer *= self.output_chunk_mask

        # theoretically, all the value of output_buffer should not be greater than 1
        # we use a slightly higher value here to accomondate numerical precision issue
        np.testing.assert_array_less(
            output_buffer,
            1.0001,
            err_msg='output buffer should not be greater than 1')

        if self.mask_myelin_threshold:
            # currently only for masking out affinity map
            assert output_buffer.shape[0] == 4
            output_chunk = output_buffer.mask_using_last_channel(
                threshold=self.mask_myelin_threshold)

            # currently neuroglancer only support float32, not float16
            if output_chunk.dtype == np.dtype('float16'):
                output_chunk = output_chunk.astype('float32')

            return output_chunk
        else:
            return output_buffer