コード例 #1
0
    def to_z_slice_pngs(self,
                        class_colors: Sequence[Color]) -> Iterator[io.BytesIO]:
        for z_slice in self.split(self.shape.updated(z=1)):
            print(f"\nz_slice: {z_slice}")
            rendered_rgb = Array5D.allocate(z_slice.shape.updated(c=3),
                                            dtype=np.dtype("float32"),
                                            value=0)
            rendered_rgb_yxc = rendered_rgb.raw("yxc")

            for prediction_channel, color in zip(
                    z_slice.split(z_slice.shape.updated(c=1)), class_colors):
                print(f"\nprediction_channel: {prediction_channel}")

                class_rgb = Array5D(np.ones(
                    prediction_channel.shape.updated(c=3).to_tuple("yxc")),
                                    axiskeys="yxc")
                class_rgb.raw("yxc")[...] *= np.asarray(
                    [color.r, color.g, color.b])
                class_rgb.raw("cyx")[...] *= prediction_channel.raw("yx")

                rendered_rgb_yxc += class_rgb.raw("yxc")

            out_image = PIL.Image.fromarray(
                rendered_rgb.raw("yxc").astype(np.uint8))  # type: ignore
            out_file = io.BytesIO()
            out_image.save(out_file, "png")
            _ = out_file.seek(0)
            yield out_file
コード例 #2
0
class SimpleSegmenter(Operator[DataRoi, List[Array5D]]):
    def __init__(
        self,
        *,
        preprocessor: Operator[DataRoi, Array5D] = OpRetriever(),
    ) -> None:
        super().__init__()
        self.preprocessor = preprocessor

    def __call__(self, /,  roi: DataRoi) -> List[Array5D]:
        data = self.preprocessor(roi)
        winning_channel_indices = Array5D(
            arr=np.argmax(data.raw(data.axiskeys), axis=data.axiskeys.index("c")),
            axiskeys=data.axiskeys.replace("c", ""),
            location=roi.start,
        )

        segmentations: List[Array5D] = []

        for class_index in range(data.shape.c):
            class_seg = Array5D.allocate(data.interval.updated(c=(0,3)), dtype=np.dtype("uint8"), value=0)
            red_channel = class_seg.cut(c=0)
            raw_segmentation = (winning_channel_indices.raw("tzyx") == class_index).astype(np.dtype("uint8")) * 255
            red_channel.raw("tzyx")[...] = raw_segmentation
            segmentations.append(class_seg)

        return segmentations
コード例 #3
0
def test_simple_segmenter():
    segmenter = SimpleSegmenter()

    input_data = ArrayDataSource(
        data=Array5D(np.asarray([
           [[ 0.1,  0.1,   0.0,  1.0],
            [ 0.2,  0.2,   0.0,  0.0],
            [ 0.3,  0.3,   0.1,  0.2],
            [ 0.4,  0.4,   0.0,  0.0]],

           [[ 0.4,  0.4,   0.0,  0.0],
            [ 0.3,  0.3,   0.0,  0.0],
            [ 0.2,  0.2,   0.4,  0.0],
            [ 0.1,  0.1,   0.0,  0.0]]
        ]), axiskeys="cyx"),
    )

    expected_segmentation = [
        Array5D(np.asarray([
           [[  0,   0, 255, 255],
            [  0,   0, 255, 255],
            [255, 255,   0, 255],
            [255, 255, 255, 255]],

           [[  0,   0,   0,   0],
            [  0,   0,   0,   0],
            [  0,   0,   0,   0],
            [  0,   0,   0,   0]],

           [[  0,   0,   0,   0],
            [  0,   0,   0,   0],
            [  0,   0,   0,   0],
            [  0,   0,   0,   0]],
        ]), axiskeys="cyx"),

        Array5D(np.asarray([
           [[255, 255,   0,   0],
            [255, 255,   0,   0],
            [  0,   0, 255,   0],
            [  0,   0,   0,   0]],

           [[  0,   0,   0,   0],
            [  0,   0,   0,   0],
            [  0,   0,   0,   0],
            [  0,   0,   0,   0]],

           [[  0,   0,   0,   0],
            [  0,   0,   0,   0],
            [  0,   0,   0,   0],
            [  0,   0,   0,   0]],
        ]), axiskeys="cyx")
    ]

    segmentations = segmenter(input_data.roi)
    for seg, expected_seg in zip(segmentations, expected_segmentation):
        assert np.all(seg.raw("cyx") == expected_seg.raw("cyx"))
コード例 #4
0
ファイル: test_datasource.py プロジェクト: ilastik/webilastik
def test_retrieve_roi_smaller_than_tile():
    # fmt: off
    data = Array5D(np.asarray([
        [[   1,    2,    3,    4,     5],
         [   6,    7,    8,    9,    10],
         [  11,   12,   13,   14,    15],
         [  16,   17,   18,   19,    20]],

        [[ 100,  200,  300,  400,   500],
         [ 600,  700,  800,  900,  1000],
         [1100, 1200, 1300, 1400,  1500],
         [1600, 1700, 1800, 1900,  2000]],
    ]).astype(np.uint32), axiskeys="cyx")

    expected_cyx = np.asarray([
        [[ 100,  200,  300,  400],
         [ 600,  700,  800,  900],
         [1100, 1200, 1300, 1400],
         [1600, 1700, 1800, 1900]]
    ])
    # fmt: on
    path = PurePosixPath(create_n5(data, chunk_size=Shape5D(c=2, y=4, x=4)))
    ds = N5DataSource(path=path / "data", filesystem=OsFs("/"))
    smaller_than_tile = ds.retrieve(c=1, y=(0, 4), x=(0, 4))
    assert np.all(smaller_than_tile.raw("cyx") == expected_cyx)
コード例 #5
0
    def __init__(
        self,
        *,
        path: PurePosixPath,
        location: Point5D = Point5D.zero(),
        filesystem: JsonableFilesystem,
        tile_shape: Optional[Shape5D] = None,
        spatial_resolution: Optional[Tuple[int, int, int]] = None,
    ):
        raw_data: "np.ndarray[Any, Any]" = skimage.io.imread(filesystem.openbin(path.as_posix())) # type: ignore
        c_axiskeys_on_disk = "yxc"[: len(raw_data.shape)]
        self._data = Array5D(raw_data, axiskeys=c_axiskeys_on_disk, location=location)

        if tile_shape is None:
            tile_shape = Shape5D.hypercube(256).to_interval5d().clamped(self._data.shape).shape

        super().__init__(
            c_axiskeys_on_disk=c_axiskeys_on_disk,
            filesystem=filesystem,
            path=path,
            dtype=self._data.dtype,
            interval=self._data.interval,
            tile_shape=tile_shape,
            spatial_resolution=spatial_resolution,
        )
コード例 #6
0
ファイル: annotation.py プロジェクト: ilastik/webilastik
    def interpolate_from_points(cls, voxels: Sequence[Point5D],
                                raw_data: DataSource):
        start = Point5D.min_coords(voxels)
        stop = Point5D.max_coords(
            voxels
        ) + 1  # +1 because slice.stop is exclusive, but max_point isinclusive
        scribbling_roi = Interval5D.create_from_start_stop(start=start,
                                                           stop=stop)
        if scribbling_roi.shape.c != 1:
            raise ValueError(
                f"Annotations must not span multiple channels: {voxels}")
        scribblings = Array5D.allocate(scribbling_roi,
                                       dtype=np.dtype(bool),
                                       value=False)

        anchor = voxels[0]
        for voxel in voxels:
            for interp_voxel in anchor.interpolate_until(voxel):
                scribblings.paint_point(point=interp_voxel, value=True)
            anchor = voxel

        return cls(scribblings._data,
                   axiskeys=scribblings.axiskeys,
                   raw_data=raw_data,
                   location=start)
コード例 #7
0
    def _do_predict(self, roi: DataRoi) -> Predictions:
        feature_data = self.feature_extractor(roi)
        linear_feature_data = feature_data.raw("tzyxc").reshape(
            (feature_data.shape.t * feature_data.shape.volume,
             feature_data.shape.c))

        predictions = Array5D.allocate(
            axiskeys="tzyxc",
            interval=self.get_expected_roi(roi),
            dtype=np.dtype('float32'),
            value=0,
        )

        assert predictions.interval == self.get_expected_roi(roi)
        raw_linear_predictions: "ndarray[Any, dtype[float32]]" = predictions.raw(
            "tzyxc").reshape((predictions.shape.t * predictions.shape.volume,
                              predictions.shape.c))

        executor = get_executor(hint="predicting")
        f = partial(_compute_partial_predictions, linear_feature_data)
        futures = [executor.submit(f, forest) for forest in self.forests]
        for partial_predictions_future in futures:
            raw_linear_predictions += partial_predictions_future.result()

        raw_linear_predictions /= self.num_trees
        predictions.setflags(write=False)

        return Predictions(
            arr=predictions.raw(predictions.axiskeys),
            axiskeys=predictions.axiskeys,
            location=predictions.location,
        )
コード例 #8
0
ファイル: test_datasource.py プロジェクト: ilastik/webilastik
def test_h5_datasource():
    data_2d = Array5D(np.arange(100).reshape(10, 10), axiskeys="yx")
    h5_path = create_h5(data_2d, axiskeys_style="vigra", chunk_shape=Shape5D(x=3, y=3))
    ds = H5DataSource(outer_path=h5_path, inner_path=PurePosixPath("/data"), filesystem=OsFs("/"))
    assert ds.shape == data_2d.shape
    assert ds.tile_shape == Shape5D(x=3, y=3)

    slc = ds.interval.updated(x=(0, 3), y=(0, 2))
    assert (ds.retrieve(slc).raw("yx") == data_2d.cut(slc).raw("yx")).all()

    data_3d = Array5D(np.arange(10 * 10 * 10).reshape(10, 10, 10), axiskeys="zyx")
    h5_path = create_h5(data_3d, axiskeys_style="vigra", chunk_shape=Shape5D(x=3, y=3))
    ds = H5DataSource(outer_path=h5_path, inner_path=PurePosixPath("/data"), filesystem=OsFs("/"))
    assert ds.shape == data_3d.shape
    assert ds.tile_shape == Shape5D(x=3, y=3)

    slc = ds.interval.updated(x=(0, 3), y=(0, 2), z=3)
    assert (ds.retrieve(slc).raw("yxz") == data_3d.cut(slc).raw("yxz")).all()
コード例 #9
0
 def decode(
         self,
         *,
         roi: Interval5D,
         dtype: "np.dtype[Any]",  #FIXME
         raw_chunk: bytes) -> Array5D:
     # "The (...) data (...) chunk is stored directly in little-endian binary format in [x, y, z, channel] Fortran order"
     raw_tile: np.ndarray[Any, Any] = np.frombuffer(
         raw_chunk,
         dtype=dtype.newbyteorder("<")).reshape(roi.shape.to_tuple("xyzc"),
                                                order="F")
     tile_5d = Array5D(raw_tile, axiskeys="xyzc", location=roi.start)
     return tile_5d
コード例 #10
0
ファイル: test_datasource.py プロジェクト: ilastik/webilastik
def test_n5_datasource():
    # fmt: off
    data = Array5D(np.asarray([
        [1,  2,  3,  4,  5 ],
        [6,  7,  8,  9,  10],
        [11, 12, 13, 14, 15],
        [16, 17, 18, 19, 20]
    ]).astype(np.uint8), axiskeys="yx")
    # fmt: on

    path = PurePosixPath(create_n5(data, chunk_size=Shape5D(x=2, y=2)))
    ds = N5DataSource(path=path / "data", filesystem=OsFs("/"))
    assert ds.shape == data.shape

    # fmt: off
    expected_raw_piece = Array5D(np.asarray([
        [1, 2, 3],
        [6, 7, 8]
    ]).astype(np.uint8), axiskeys="yx")
    # fmt: on
    assert ds.retrieve(x=(0, 3), y=(0, 2)) == expected_raw_piece

    ds2 = pickle.loads(pickle.dumps(ds))
    assert ds2.retrieve(x=(0, 3), y=(0, 2)) == expected_raw_piece
コード例 #11
0
class FeatureExtractorCollection(FeatureExtractor):
    def __init__(self, extractors: Iterable[FeatureExtractor]):
        self.extractors = tuple(extractors)
        assert len(self.extractors) > 0
        super().__init__()

    def is_applicable_to(self, datasource: DataSource) -> bool:
        return all(fx.is_applicable_to(datasource) for fx in self.extractors)

    def __call__(self, /, roi: DataRoi) -> FeatureData:
        assert roi.interval.c[0] == 0
        feature_promises: Dict[int, Future[FeatureData]] = {}

        executor = get_executor(hint="feature_extraction",
                                max_workers=len(self.extractors))
        from webilastik.features.ilp_filter import IlpGaussianSmoothing

        feature_promises = {
            fx_index: executor.submit(fx, roi)
            for fx_index, fx in enumerate(self.extractors)
            if isinstance(fx, IlpGaussianSmoothing)
        }
        feature_promises.update({
            fx_index: executor.submit(fx, roi)
            for fx_index, fx in enumerate(self.extractors)
            if not isinstance(fx, IlpGaussianSmoothing)
        })
        assert len(feature_promises) == len(self.extractors)
        features = [
            feature_promises[i].result() for i in range(len(self.extractors))
        ]

        out = Array5D.allocate(
            dtype=np.dtype("float32"),
            interval=roi.shape.updated(c=sum(feat.shape.c
                                             for feat in features)),
            axiskeys="tzyxc",
        ).translated(roi.start)

        channel_offset: int = 0
        for feature in features:
            out.set(feature.translated(Point5D.zero(c=channel_offset)))
            channel_offset += feature.shape.c

        return FeatureData(arr=out.raw(out.axiskeys),
                           axiskeys=out.axiskeys,
                           location=out.location)
コード例 #12
0
ファイル: test_datasource.py プロジェクト: ilastik/webilastik
def create_h5(array: Array5D, axiskeys_style: str, chunk_shape: Optional[Shape5D] = None, axiskeys: str = "xyztc"):
    raw_chunk_shape = (chunk_shape or Shape5D() * 2).clamped(maximum=array.shape).to_tuple(axiskeys)

    path = tempfile.mkstemp()[1] + ".h5"
    f = h5py.File(path, "w")
    ds = f.create_dataset("data", chunks=raw_chunk_shape, data=array.raw(axiskeys))
    if axiskeys_style == "dims":
        for key, dim in zip(axiskeys, ds.dims):
            dim.label = key
    elif axiskeys_style == "vigra":
        type_flags = {"x": 2, "y": 2, "z": 2, "t": 2, "c": 1}
        axistags = [{"key": key, "typeflags": type_flags[key], "resolution": 0, "description": ""} for key in axiskeys]
        ds.attrs["axistags"] = json.dumps({"axes": axistags})
    else:
        raise Exception(f"Bad axiskeys_style: {axiskeys_style}")

    return PurePosixPath(path)
コード例 #13
0
    def decode(
        self,
        *,
        roi: Interval5D,
        dtype: np.dtype, #type: ignore
        raw_chunk: bytes
    ) -> Array5D:
        # "The width and height of the JPEG image may be arbitrary (...)"
        # "the total number of pixels is equal to the product of the x, y, and z dimensions of the subvolume"
        # "(...) the 1-D array obtained by concatenating the horizontal rows of the image corresponds to the
        # flattened [x, y, z] Fortran-order (i,e. zyx C order) representation of the subvolume."

        # FIXME: check if this works with any sort of funny JPEG shapes
        # FIXME: Also, what to do if dtype is weird?
        raw_jpg: np.ndarray = skimage.io.imread(io.BytesIO(raw_chunk)) # type: ignore
        tile_5d = Array5D(raw_jpg.reshape(roi.shape.to_tuple("zyxc")), axiskeys="zyxc")
        return tile_5d
コード例 #14
0
    def __call__(self, roi: DataRoi) -> FeatureData:
        haloed_roi = roi.enlarged(self.halo)
        source_data = self.preprocessor(haloed_roi)

        step_shape: Shape5D = Shape5D(
            c=1,
            t=1,
            x=1 if self.axis_2d == "x" else source_data.shape.x,
            y=1 if self.axis_2d == "y" else source_data.shape.y,
            z=1 if self.axis_2d == "z" else source_data.shape.z,
        )

        out = Array5D.allocate(
            interval=roi.updated(c=(roi.c[0] * self.channel_multiplier,
                                    roi.c[1] * self.channel_multiplier)),
            dtype=numpy.dtype("float32"),
            axiskeys=source_data.axiskeys.replace("c", "") +
            "c"  # fastfilters puts channel last
        )

        for data_slice in source_data.split(step_shape):
            source_axes = "zyx"
            if self.axis_2d:
                source_axes = source_axes.replace(self.axis_2d, "")

            raw_data: "ndarray[Any, dtype[float32]]" = data_slice.raw(
                source_axes).astype(numpy.float32)
            raw_feature_data: "ndarray[Any, dtype[float32]]" = self.filter_fn(
                raw_data)

            feature_data = FeatureData(
                raw_feature_data,
                axiskeys=source_axes +
                "c" if len(raw_feature_data.shape) > len(source_axes) else
                source_axes,
                location=data_slice.location.updated(c=data_slice.location.c *
                                                     self.channel_multiplier))
            out.set(feature_data, autocrop=True)
        out.setflags(write=False)
        return FeatureData(
            out.raw(out.axiskeys),
            axiskeys=out.axiskeys,
            location=out.location,
        )
コード例 #15
0
ファイル: test_datasource.py プロジェクト: ilastik/webilastik
def test_data_roi_get_tiles_can_clamp_to_datasource_tiles():
    # fmt: off
    data = Array5D(np.asarray([
        [1,  2,  3,  4,  5],
        [6,  7,  8,  9,  10],
        [11, 12, 13, 14, 15],
        [16, 17, 18, 19, 20],
    ]).astype(np.uint8), axiskeys="yx")
    # fmt: on

    ds = ArrayDataSource(data=data, tile_shape=Shape5D(x=2, y=2))
    data_slice = DataRoi(datasource=ds, x=(1, 4), y=(0, 3))

    # fmt: off
    dataslice_expected_data = Array5D(np.asarray([
        [2,  3,  4],
        [7,  8,  9],
        [12, 13, 14]
    ]).astype(np.uint8), axiskeys="yx", location=Point5D.zero(x=1))
    # fmt: on

    assert data_slice.retrieve() == dataslice_expected_data

    # fmt: off
    dataslice_expected_slices = [
        Array5D(np.asarray([
            [1, 2],
            [6, 7]
        ]).astype(np.uint8), axiskeys="yx", location=Point5D.zero()),

        Array5D(np.asarray([
            [3,  4],
            [8,  9],
        ]).astype(np.uint8), axiskeys="yx", location=Point5D.zero(x=2)),

        Array5D(np.asarray([
            [11, 12],
            [16, 17],
        ]).astype(np.uint8), axiskeys="yx", location=Point5D.zero(y=2)),

        Array5D(np.asarray([
            [13, 14],
            [18, 19],
        ]).astype(np.uint8), axiskeys="yx", location=Point5D.zero(x=2, y=2))
    ]
    # fmt: on
    expected_slice_dict = {a.interval: a for a in dataslice_expected_slices}
    for piece in data_slice.get_datasource_tiles(clamp_to_datasource=True):
        expected_data = expected_slice_dict.pop(piece.interval)
        assert expected_data == piece.retrieve()
    assert len(expected_slice_dict) == 0
コード例 #16
0
ファイル: test_datasource.py プロジェクト: ilastik/webilastik
def create_n5(
    array: Array5D, *, axiskeys: Optional[str] = None, chunk_size: Shape5D, compression: N5Compressor = RawCompressor()
):
    path = PurePosixPath(tempfile.mkstemp()[1] + ".n5")
    sink = N5DatasetSink(
        outer_path=path,
        inner_path=PurePosixPath("/data"),
        filesystem=OsFs("/"),
        attributes=N5DatasetAttributes(
            dimensions=array.shape,
            blockSize=chunk_size,
            c_axiskeys=axiskeys or array.axiskeys,
            dataType=array.dtype,
            compression=compression,
        )
    )
    sink_writer = sink.create()
    assert not isinstance(sink_writer, Exception)

    for tile in array.split(chunk_size):
        sink_writer.write(tile)
    return path.as_posix()
コード例 #17
0
 def _process_tile(self, tile: Array5D) -> None:
     tile = N5Block.fromArray5D(tile)
     tile_path = self.get_tile_dataset_path(global_roi=tile.roi)
     with self.filesystem.openbin(tile_path, "w") as f:
         f.write(tile.to_n5_bytes(axiskeys=self.axiskeys, compression_type=self.compression_type))
コード例 #18
0
ファイル: __init__.py プロジェクト: ilastik/webilastik
 def _allocate(self, interval: Union[Shape5D, Interval5D], fill_value: int, axiskeys_hint: str = "tzyxc") -> Array5D:
     return Array5D.allocate(interval, dtype=self.dtype, value=fill_value, axiskeys=axiskeys_hint)
コード例 #19
0
ファイル: annotation.py プロジェクト: ilastik/webilastik
 def colored(self, value: np.uint8) -> Array5D:
     return Array5D(self._data * value,
                    axiskeys=self.axiskeys,
                    location=self.location)
コード例 #20
0
 def _get_tile(self, tile: Interval5D) -> Array5D:
     slices = tile.translated(-self.location).to_slices(self.axiskeys)
     raw = self._dataset[slices]
     if not isinstance(raw, ndarray):
         raise IOError("Expected ndarray at {slices}, found {raw}")
     return Array5D(raw, axiskeys=self.axiskeys, location=tile.start)
コード例 #21
0
from pathlib import PurePosixPath

import numpy as np
from ndstructs.point5D import Point5D, Shape5D
from ndstructs.array5D import Array5D

from webilastik.datasink.n5_dataset_sink import N5DatasetSink
from webilastik.datasink.precomputed_chunks_sink import PrecomputedChunksScaleSink
from webilastik.datasource import DataRoi
from webilastik.datasource.array_datasource import ArrayDataSource
from webilastik.datasource.n5_datasource import N5DataSource
from webilastik.datasource.precomputed_chunks_info import PrecomputedChunksScale, RawEncoder
from webilastik.datasource.n5_attributes import GzipCompressor, N5DatasetAttributes, RawCompressor
from webilastik.datasource.precomputed_chunks_datasource import PrecomputedChunksDataSource

data = Array5D(np.arange(20 * 10 * 7).reshape(20, 10, 7), axiskeys="xyz")
data.setflags(write=False)

datasource = ArrayDataSource(data=data, tile_shape=Shape5D(x=10, y=10))


def test_n5_attributes():
    attributes = N5DatasetAttributes(
        dimensions=Shape5D(x=100, y=200),
        blockSize=Shape5D(x=10, y=20),
        c_axiskeys="yx",
        dataType=np.dtype("uint16").newbyteorder(">"),
        compression=GzipCompressor(level=3))

    reserialized_attributes = N5DatasetAttributes.from_json_data(
        attributes.to_json_data())
コード例 #22
0
 def make_some_random_array(seed: int) -> Array5D:
     return Array5D(np.random.rand(5, 5), axiskeys="yx")
コード例 #23
0
    def parse(cls, group: h5py.Group, raw_data_sources: Mapping[int, "FsDataSource | None"]) -> "IlpPixelClassificationGroup":
        LabelColors = ensure_color_list(group, "LabelColors")
        LabelNames = ensure_encoded_string_list(group, "LabelNames")
        class_to_color: Mapping[np.uint8, Color] = {np.uint8(i): color for i, color in enumerate(LabelColors, start=1)}

        label_classes: Dict[Color, Label] = {color: Label(name=name, color=color, annotations=[]) for name, color in zip(LabelNames, LabelColors)}
        LabelSets = ensure_group(group, "LabelSets")
        for lane_key in LabelSets.keys():
            if not lane_key.startswith("labels"):
                continue
            lane_index = int(lane_key.replace("labels", ""))
            lane_label_blocks = ensure_group(LabelSets, lane_key)
            if len(lane_label_blocks.keys()) == 0:
                continue
            raw_data = raw_data_sources.get(lane_index)
            if raw_data is None:
                raise IlpParsingError(f"No datasource for lane {lane_index:03d}")
            for block_name in lane_label_blocks.keys():
                if not block_name.startswith("block"):
                    continue
                block = ensure_dataset(lane_label_blocks, block_name)
                block_data = block[()]
                if not isinstance(block_data, np.ndarray):
                    raise IlpParsingError("Expected annotation block to contain a ndarray")

                raw_axistags = block.attrs.get("axistags")
                if not isinstance(raw_axistags, str):
                    raise IlpParsingError(f"Expected axistags to be a str, found {raw_axistags}")
                axistags = AxisTags.fromJSON(raw_axistags)
                axiskeys = "".join(axistags.keys())

                if "blockSlice" not in block.attrs:
                    raise IlpParsingError(f"Expected 'blockSlice' in attrs from {block.name}")
                blockSlice = block.attrs["blockSlice"]
                if not isinstance(blockSlice, str):
                    raise IlpParsingError(f"Expected 'blockSlice'' to be a str, found {blockSlice}")
                # import pydevd; pydevd.settrace()
                blockSpans: Sequence[List[str]] = [span_str.split(":") for span_str in blockSlice[1:-1].split(",")]
                blockInterval = Interval5D.zero(**{
                    key: (int(span[0]), int(span[1]))
                    for key, span in zip(axiskeys, blockSpans)
                })

                block_5d = Array5D(block_data, axiskeys=axiskeys)
                for color_5d in block_5d.unique_colors().split(shape=Shape5D(x=1, c=block_5d.shape.c)):
                    color_index = np.uint8(color_5d.raw("c")[0])
                    if color_index == np.uint8(0): # background
                        continue
                    color = class_to_color.get(color_index)
                    if color is None:
                        raise IlpParsingError(f"Could not find a label color for index {color_index}")
                    annotation_data: "np.ndarray[Any, np.dtype[np.uint8]]" = block_5d.color_filtered(color=color_5d).raw(axiskeys)
                    annotation = Annotation(
                        annotation_data.astype(np.dtype(bool)),
                        location=blockInterval.start,
                        axiskeys=axiskeys, # FIXME: what if the user changed the axiskeys in the data source?
                        raw_data=raw_data,
                    )

                    label_classes[color].annotations.append(annotation)



        ClassifierFactory = ensure_bytes(group, "ClassifierFactory")
        if ClassifierFactory != VIGRA_ILP_CLASSIFIER_FACTORY:
            raise IlpParsingError(f"Expecting ClassifierFactory to be pickled ParallelVigraRfLazyflowClassifierFactory, found {ClassifierFactory}")
        if "ClassifierForests" in group:
            ClassifierForests = ensure_group(group, "ClassifierForests")
            forests: List[VigraRandomForest] = []
            for forest_key in sorted(ClassifierForests.keys()):
                if not forest_key.startswith("Forest"):
                    continue
                forest = VigraRandomForest(group.file.filename, f"{ClassifierForests.name}/{forest_key}")
                # forest_bytes = ensure_bytes(ClassifierForests, forest_key)
                # forest = h5_bytes_to_vigra_forest(h5_bytes=VigraForestH5Bytes(forest_bytes))
                forests.append(forest)

            feature_names = ensure_encoded_string_list(ClassifierForests, "feature_names")
            feature_extractors, expected_num_channels = cls.ilp_filters_and_expected_num_channels_from_names(feature_names)

            classifier = VigraPixelClassifier(
                feature_extractors=feature_extractors,
                forest_h5_bytes=[vigra_forest_to_h5_bytes(forest) for forest in forests],
                num_classes=len([label for label in label_classes.values() if not label.is_empty()]),
                num_input_channels=expected_num_channels,
            )
        else:
            classifier = None

        return IlpPixelClassificationGroup(
            classifier=classifier,
            labels=list(label_classes.values()),
        )
コード例 #24
0
ファイル: test_datasource.py プロジェクト: ilastik/webilastik
def test_sequence_datasource():
    # fmt: off
    img1_data = Array5D(np.asarray([
       [[100, 101, 102, 103, 104],
        [105, 106, 107, 108, 109],
        [110, 111, 112, 113, 114],
        [115, 116, 117, 118, 119]],

       [[120, 121, 122, 123, 124],
        [125, 126, 127, 128, 129],
        [130, 131, 132, 133, 134],
        [135, 136, 137, 138, 139]],

       [[140, 141, 142, 143, 144],
        [145, 146, 147, 148, 149],
        [150, 151, 152, 153, 154],
        [155, 156, 157, 158, 159]]
    ]), axiskeys="cyx")

    img2_data = Array5D(np.asarray([
       [[200, 201, 202, 203, 204],
        [205, 206, 207, 208, 209],
        [210, 211, 212, 213, 214],
        [215, 216, 217, 218, 219]],

       [[220, 221, 222, 223, 224],
        [225, 226, 227, 228, 229],
        [230, 231, 232, 233, 234],
        [235, 236, 237, 238, 239]],

       [[240, 241, 242, 243, 244],
        [245, 246, 247, 248, 249],
        [250, 251, 252, 253, 254],
        [255, 256, 257, 258, 259]]
    ]), axiskeys="cyx")

    img3_data = Array5D(np.asarray([
       [[300, 301, 302, 303, 304],
        [305, 306, 307, 308, 309],
        [310, 311, 312, 313, 314],
        [315, 316, 317, 318, 319]],

       [[320, 321, 322, 323, 324],
        [325, 326, 327, 328, 329],
        [330, 331, 332, 333, 334],
        [335, 336, 337, 338, 339]],

       [[340, 341, 342, 343, 344],
        [345, 346, 347, 348, 349],
        [350, 351, 352, 353, 354],
        [355, 356, 357, 358, 359]]
    ]), axiskeys="cyx")

    expected_x_2_4__y_1_3 = Array5D(np.asarray([
      [[[107, 108],
        [112, 113]],

       [[127, 128],
        [132, 133]],

       [[147, 148],
        [152, 153]]],


      [[[207, 208],
        [212, 213]],

       [[227, 228],
        [232, 233]],

       [[247, 248],
        [252, 253]]],


      [[[307, 308],
        [312, 313]],

       [[327, 328],
        [332, 333]],

       [[347, 348],
        [352, 353]]],
    ]), axiskeys="zcyx")
    # fmt: on
    slice_x_2_4__y_1_3 = {"x": (2, 4), "y": (1, 3)}

    h5_outer_paths = [
        # create_n5(img1_data, axiskeys="cyx"),
        create_h5(img1_data, axiskeys_style="dims", axiskeys="cyx"),
        # create_n5(img2_data, axiskeys="cyx"),
        create_h5(img2_data, axiskeys_style="dims", axiskeys="cyx"),
        # create_n5(img3_data, axiskeys="cyx"),
        create_h5(img3_data, axiskeys_style="dims", axiskeys="cyx"),
    ]

    def stack_h5s(stack_axis: str) -> List[H5DataSource]:
        offset = Point5D.zero()
        stack: List[H5DataSource] = []
        for outer_path in h5_outer_paths:
            stack.append(H5DataSource(outer_path=outer_path, inner_path=PurePosixPath("/data"), filesystem=OsFs("/"), location=offset))
            offset += Point5D.zero(**{stack_axis: stack[-1].shape[stack_axis]})
        return stack

    seq_ds = SequenceDataSource(datasources=stack_h5s("z"), stack_axis="z")
    assert seq_ds.shape == Shape5D(x=5, y=4, c=3, z=3)
    data = seq_ds.retrieve(**slice_x_2_4__y_1_3)
    assert (expected_x_2_4__y_1_3.raw("xyzc") == data.raw("xyzc")).all()

    seq_ds = SequenceDataSource(datasources=stack_h5s("z"), stack_axis="z")
    data = seq_ds.retrieve(**slice_x_2_4__y_1_3)
    assert (expected_x_2_4__y_1_3.raw("xyzc") == data.raw("xyzc")).all()
コード例 #25
0
ファイル: test_datasource.py プロジェクト: ilastik/webilastik
def test_neighboring_tiles():
    # fmt: off
    arr = Array5D(np.asarray([
        [10, 11, 12,   20, 21, 22,   30],
        [13, 14, 15,   23, 24, 25,   33],
        [16, 17, 18,   26, 27, 28,   36],

        [40, 41, 42,   50, 51, 52,   60],
        [43, 44, 45,   53, 54, 55,   63],
        [46, 47, 48,   56, 57, 58,   66],

        [70, 71, 72,   80, 81, 82,   90],
        [73, 74, 75,   83, 84, 85,   93],
        [76, 77, 78,   86, 87, 88,   96],

        [0,   1,  2,    3,  4,  5,    6]], dtype=np.uint8), axiskeys="yx")

    ds = SkimageDataSource(path=create_png(arr), filesystem=OsFs("/"))

    fifties_slice = DataRoi(ds, x=(3, 6), y=(3, 6))
    expected_fifties_slice = Array5D(np.asarray([
        [50, 51, 52],
        [53, 54, 55],
        [56, 57, 58]
    ]), axiskeys="yx")
    # fmt: on

    top_slice = DataRoi(ds, x=(3, 6), y=(0, 3))
    bottom_slice = DataRoi(ds, x=(3, 6), y=(6, 9))

    right_slice = DataRoi(ds, x=(6, 7), y=(3, 6))
    left_slice = DataRoi(ds, x=(0, 3), y=(3, 6))

    # fmt: off
    fifties_neighbor_data = {
        top_slice: Array5D(np.asarray([
            [20, 21, 22],
            [23, 24, 25],
            [26, 27, 28]
        ]), axiskeys="yx"),

        right_slice: Array5D(np.asarray([
            [60],
            [63],
            [66]
        ]), axiskeys="yx"),

        bottom_slice: Array5D(np.asarray([
            [80, 81, 82],
            [83, 84, 85],
            [86, 87, 88]
        ]), axiskeys="yx"),

        left_slice: Array5D(np.asarray([
            [40, 41, 42],
            [43, 44, 45],
            [46, 47, 48]
        ]), axiskeys="yx"),
    }

    # fmt: on

    assert (fifties_slice.retrieve().raw("yx") == expected_fifties_slice.raw("yx")).all()

    for neighbor in fifties_slice.get_neighboring_tiles(tile_shape=Shape5D(x=3, y=3)):
        try:
            expected_slice = fifties_neighbor_data.pop(neighbor)
            assert (expected_slice.raw("yx") == neighbor.retrieve().raw("yx")).all()
        except KeyError:
            print(f"\nWas searching for ", neighbor, "\n")
            for k in fifties_neighbor_data.keys():
                print("--->>> ", k)
    assert len(fifties_neighbor_data) == 0
コード例 #26
0
 def encode(self, data: Array5D) -> bytes:
     return data.raw("xyzc").tobytes("F")
コード例 #27
0
ファイル: test_datasource.py プロジェクト: ilastik/webilastik
    f = h5py.File(path, "w")
    ds = f.create_dataset("data", chunks=raw_chunk_shape, data=array.raw(axiskeys))
    if axiskeys_style == "dims":
        for key, dim in zip(axiskeys, ds.dims):
            dim.label = key
    elif axiskeys_style == "vigra":
        type_flags = {"x": 2, "y": 2, "z": 2, "t": 2, "c": 1}
        axistags = [{"key": key, "typeflags": type_flags[key], "resolution": 0, "description": ""} for key in axiskeys]
        ds.attrs["axistags"] = json.dumps({"axes": axistags})
    else:
        raise Exception(f"Bad axiskeys_style: {axiskeys_style}")

    return PurePosixPath(path)


png_image: PurePosixPath = create_png(Array5D(raw, axiskeys="yx"))

def tile_equals(tile: DataSource, axiskeys: str, raw: "np.ndarray[Any, Any]") -> bool:
    return (tile.retrieve().raw(axiskeys) == raw).all()


def test_retrieve_roi_smaller_than_tile():
    # fmt: off
    data = Array5D(np.asarray([
        [[   1,    2,    3,    4,     5],
         [   6,    7,    8,    9,    10],
         [  11,   12,   13,   14,    15],
         [  16,   17,   18,   19,    20]],

        [[ 100,  200,  300,  400,   500],
         [ 600,  700,  800,  900,  1000],
コード例 #28
0
    def populate_group(self, group: h5py.Group):
        LabelColors: "ndarray[Any, dtype[int64]]"  = np.asarray([label.color.rgba for label in self.labels], dtype=int64)

        # expected group keys to look like this:
        # ['Bookmarks', 'ClassifierFactory', 'LabelColors', 'LabelNames', 'PmapColors', 'StorageVersion', 'LabelSets', 'ClassifierForests']>
        bookmark = group.create_group("Bookmarks").create_dataset("0000", data=np.void(pickle.dumps([], 0))) # empty value is [], serialized with SerialPickleableSlot
        bookmark.attrs["version"] = 1
        group["ClassifierFactory"] = VIGRA_ILP_CLASSIFIER_FACTORY
        group["LabelColors"] = LabelColors
        group["LabelColors"].attrs["isEmpty"] = False
        group["LabelNames"] = [label.name.encode("utf8") for label in self.labels]
        group["LabelNames"].attrs["isEmpty"] = False
        group["PmapColors"] = LabelColors
        group["PmapColors"].attrs["isEmpty"] = False
        group["StorageVersion"] = "0.1".encode("utf8")

        merged_annotation_tiles: Dict[DataSource, Dict[Interval5D, Array5D]] = {}
        for label_class, label in enumerate(self.labels, start=1):
            for annotation in label.annotations:
                datasource = annotation.raw_data
                merged_tiles = merged_annotation_tiles.setdefault(datasource, {})

                for interval in annotation.interval.get_tiles(
                    tile_shape=datasource.tile_shape.updated(c=1), tiles_origin=datasource.interval.start.updated(c=0)
                ):
                    annotation_tile = annotation.cut(interval.clamped(annotation.interval))
                    tile = merged_tiles.setdefault(interval, Array5D.allocate(interval=interval, value=0, dtype=np.dtype("uint8")))
                    tile.set(annotation_tile.colored(np.uint8(label_class)), mask_value=0)

        LabelSets = group.create_group("LabelSets")
        for lane_index, (lane_datasource, blocks) in enumerate(merged_annotation_tiles.items()):
            assert isinstance(lane_datasource, FsDataSource) #FIXME? how do autocontext annotations work? They wouldn't be on FsDataSource
            axiskeys = lane_datasource.c_axiskeys_on_disk
            label_set = LabelSets.create_group(f"labels{lane_index:03}")
            for block_index, block in enumerate(blocks.values()):
                labels_dataset = label_set.create_dataset(f"block{block_index:04d}", data=block.raw(axiskeys))
                labels_dataset.attrs["blockSlice"] = "[" + ",".join(f"{slc.start}:{slc.stop}" for slc in block.interval.updated(c=0).to_slices(axiskeys)) + "]"
                labels_dataset.attrs["axistags"] = vigra.defaultAxistags(axiskeys).toJSON()
        if len(LabelSets.keys()) == 0:
            _ = LabelSets.create_group("labels000")  # empty labels still produce this in classic ilastik

        if self.classifier:
            # ['Forest0000', ..., 'Forest000N', 'feature_names', 'known_labels', 'pickled_type']
            ClassifierForests = group.create_group("ClassifierForests")

            feature_names: List[bytes] = []
            get_feature_extractor_order: Callable[[IlpFilter], int] = lambda ex: self.feature_classes.index(ex.__class__)
            for fe in sorted(self.classifier.feature_extractors, key=get_feature_extractor_order):
                for c in range(self.classifier.num_input_channels * fe.channel_multiplier):
                    feature_names.append(self.make_feature_ilp_name(fe, channel_index=c).encode("utf8"))

            for forest_index, forest_bytes in enumerate(self.classifier.forest_h5_bytes):
                forests_h5_path = dump_to_temp_file(forest_bytes)
                with h5py.File(forests_h5_path, "r") as f:
                    forest_group = f["/"]
                    assert isinstance(forest_group, h5py.Group)
                    ClassifierForests.copy(forest_group, f"Forest{forest_index:04}") # 'Forest0000', ..., 'Forest000N'

            ClassifierForests["feature_names"] = feature_names
            ClassifierForests["known_labels"] = np.asarray(self.classifier.classes).astype(np.uint32)
            ClassifierForests["pickled_type"] = b"clazyflow.classifiers.parallelVigraRfLazyflowClassifier\nParallelVigraRfLazyflowClassifier\np0\n."
コード例 #29
0
 def enlarged(self, radius: Point5D, limits: Interval5D) -> "ConnectedComponents":
     """Enlarges the array by 'radius', and fills this halo with zero"""
     haloed_roi = self.interval.enlarged(radius).clamped(limits)
     haloed_data = Array5D.allocate(haloed_roi, value=0, dtype=self.dtype)
     haloed_data.set(self)
     return ConnectedComponents.from_array5d(haloed_data, labels=self.labels)
コード例 #30
0
ファイル: test_datasource.py プロジェクト: ilastik/webilastik
def create_png(array: Array5D) -> PurePosixPath:
    png_path = tempfile.mkstemp()[1] + ".png"
    skimage.io.imsave(png_path, array.raw("yxc"))
    return PurePosixPath(png_path)