def test_bioformats(
    tmp_path: Path,
    url: str,
    filename: str,
    kwargs: Dict,
    dtype: str,
    num_channels: int,
    size: Tuple[int, int, int],
) -> wk.Dataset:
    unzip_path = tmp_path / "unzip"
    download_and_unpack(url, unzip_path)
    ds = wk.Dataset(tmp_path / "ds", (1, 1, 1))
    with wk.utils.get_executor_for_args(None) as executor:
        l = ds.add_layer_from_images(
            str(unzip_path / filename),
            layer_name=filename,
            compress=True,
            executor=executor,
            use_bioformats=True,
            **kwargs,
        )
        assert l.dtype_per_channel == np.dtype(dtype)
        assert l.num_channels == num_channels
        assert l.bounding_box == wk.BoundingBox(topleft=(0, 0, 0), size=size)
    return ds
def test_repo_images(
    tmp_path: Path,
    path: str,
    kwargs: Dict,
    dtype: str,
    num_channels: int,
    size: Tuple[int, int, int],
) -> wk.Dataset:
    with wk.utils.get_executor_for_args(None) as executor:
        ds = wk.Dataset(tmp_path, (1, 1, 1))
        layer_name = "__".join(
            (path if isinstance(path, str) else str(path[0])).split("/")[1:])
        l = ds.add_layer_from_images(
            path,
            layer_name=layer_name,
            compress=True,
            executor=executor,
            **kwargs,
        )
        assert l.dtype_per_channel == np.dtype(dtype)
        assert l.num_channels == num_channels
        assert l.bounding_box == wk.BoundingBox(topleft=(0, 0, 0), size=size)
        if isinstance(l, wk.SegmentationLayer):
            assert l.largest_segment_id > 0
    return ds
def test_compare_tifffile(tmp_path: Path) -> None:
    ds = wk.Dataset(tmp_path, (1, 1, 1))
    l = ds.add_layer_from_images(
        "testdata/tiff/test.*.tiff",
        layer_name="compare_tifffile",
        compress=True,
        category="segmentation",
    )
    data = l.get_finest_mag().read()[0, :, :]
    for z_index in range(0, data.shape[-1]):
        with TiffFile("testdata/tiff/test.0000.tiff") as tif_file:
            comparison_slice = tif_file.asarray().T
        assert np.array_equal(data[:, :, z_index], comparison_slice)
Exemplo n.º 4
0
def main() -> None:
    # load your data - we use an example 3D dataset here
    img = data.cells3d()  # (z, c, y, x)

    # make sure that the dimension of your data has the right order
    # we expect the following dimensions: Channels, X, Y, Z.
    img = np.transpose(img, [1, 3, 2, 0])

    # choose a name for our dataset
    time_str = strftime("%Y-%m-%d_%H-%M-%S", gmtime())
    name = f"cell_{time_str}"

    # voxel_size is defined in nm
    ds = wk.Dataset(name, voxel_size=(260, 260, 290))

    ds.default_view_configuration = DatasetViewConfiguration(zoom=0.35)

    # The example microscopy data has two channels
    # Channel 0 contains cell membranes, channel 1 contains nuclei.
    layer_membranes = ds.add_layer(
        "cell membranes",
        COLOR_CATEGORY,
        dtype_per_layer=img.dtype,
    )

    layer_membranes.add_mag(1, compress=True).write(img[0, :])

    layer_membranes.default_view_configuration = LayerViewConfiguration(
        color=(17, 212, 17), intensity_range=(0, 16000))

    layer_nuclei = ds.add_layer(
        "nuclei",
        COLOR_CATEGORY,
        dtype_per_layer=img.dtype,
    )

    layer_nuclei.add_mag(1, compress=True).write(img[1, :])

    layer_nuclei.default_view_configuration = LayerViewConfiguration(
        color=(212, 17, 17), intensity_range=(3000, 30000))

    remote_dataset = ds.upload()
    url = remote_dataset.url
    print(f"Successfully uploaded {url}")
Exemplo n.º 5
0
def temporary_annotation_view(volume_annotation_path: Path) -> Iterator[Layer]:
    """
    Given a volume annotation path, create a temporary dataset which
    contains the volume annotation via a symlink. Yield the layer
    so that one can work with the annotation as a wk.Dataset.
    """

    with TemporaryDirectory() as tmp_annotation_dir:
        tmp_annotation_dataset_path = (Path(tmp_annotation_dir) /
                                       "tmp_annotation_dataset")

        input_annotation_dataset = wk.Dataset(str(tmp_annotation_dataset_path),
                                              voxel_size=(1, 1, 1),
                                              exist_ok=True)

        # Ideally, the following code would be used, but there are two problems:
        # - save_volume_annotation cannot deal with the
        #   new named volume annotation layers, yet
        # - save_volume_annotation tries to read the entire data (beginning from 0, 0, 0)
        #   to infer the largest_segment_id which can easily exceed the available RAM.
        #
        # volume_annotation = open_annotation(volume_annotation_path)
        # input_annotation_layer = volume_annotation.save_volume_annotation(
        #     input_annotation_dataset, "volume_annotation"
        # )

        os.symlink(volume_annotation_path,
                   tmp_annotation_dataset_path / "segmentation")
        input_annotation_layer = input_annotation_dataset.add_layer_for_existing_files(
            layer_name="segmentation",
            category="segmentation",
            largest_segment_id=
            0,  # This is incorrect, but for globalize_floodfill not relevant.
        )

        yield input_annotation_layer
Exemplo n.º 6
0
def main() -> None:
    # We are going to use a public demo annotation for this example

    annotation = wk.Annotation.download(
        "https://webknossos.org/annotations/616457c2010000870032ced4")

    # Step 1: Download the training data annotation from webKnossos to our local computer
    training_data_bbox = annotation.user_bounding_boxes[
        0]  # type: ignore[index]
    time_str = strftime("%Y-%m-%d_%H-%M-%S", gmtime())
    new_dataset_name = annotation.dataset_name + f"_segmented_{time_str}"
    dataset = annotation.get_remote_base_dataset(
        webknossos_url="https://webknossos.org")

    with annotation.temporary_volume_layer_copy() as volume_annotation_layer:
        mag = volume_annotation_layer.get_finest_mag().mag
        volume_annotation_data = volume_annotation_layer.mags[mag].read(
            absolute_bounding_box=training_data_bbox)

    color_mag_view = dataset.layers["color"].mags[mag]

    # Step 2: Initialize a machine learning model to segment our dataset
    features_func = partial(feature.multiscale_basic_features,
                            multichannel=True,
                            edges=False)
    segmenter = TrainableSegmenter(features_func=features_func)

    # Step 3: Manipulate our data to fit the ML model and start training on
    # data from our annotated training data bounding box
    print("Starting training…")
    img_data_train = color_mag_view.read(
        absolute_bounding_box=training_data_bbox
    )  # wk data has dimensions (Channels, X, Y, Z)
    # move channels to last dimension, remove z dimension to match skimage's shape
    X_train = np.moveaxis(np.squeeze(img_data_train), 0, -1)
    Y_train = np.squeeze(volume_annotation_data)

    segmenter.fit(X_train, Y_train)

    # Step 4: Use our trained model and predict a class for each pixel in the dataset
    # to get a full segmentation of the data
    print("Starting prediction…")
    X_predict = np.moveaxis(np.squeeze(color_mag_view.read()), 0, -1)
    Y_predicted = segmenter.predict(X_predict)
    segmentation = Y_predicted[:, :, None]  # adds z dimension
    assert segmentation.max() < 256
    segmentation = segmentation.astype("uint8")

    # Step 5: Bundle everying a webKnossos layer and upload to wK for viewing and further work
    with TemporaryDirectory() as tempdir:
        new_dataset = wk.Dataset(tempdir,
                                 voxel_size=dataset.voxel_size,
                                 name=new_dataset_name)
        segmentation_layer = new_dataset.add_layer(
            "segmentation",
            wk.SEGMENTATION_CATEGORY,
            segmentation.dtype,
            compressed=True,
            largest_segment_id=int(segmentation.max()),
        )
        segmentation_layer.bounding_box = dataset.layers["color"].bounding_box
        segmentation_layer.add_mag(mag, compress=True).write(segmentation)
        segmentation_layer.downsample(sampling_mode="constant_z")

        remote_ds = new_dataset.upload(
            layers_to_link=[dataset.layers["color"]]
            if "PYTEST_CURRENT_TEST" not in os.environ else None)

    url = remote_ds.url
    print(f"Successfully uploaded {url}")
def main() -> None:
    #####################
    # Opening a dataset #
    #####################

    dataset = wk.Dataset.open("testdata/simple_wkw_dataset")
    # Assuming that the dataset has a layer "color"
    # and the layer has the magnification 1
    layer = dataset.get_layer("color")
    mag1 = layer.get_mag("1")

    ######################
    # Creating a dataset #
    ######################

    dataset = wk.Dataset("testoutput/my_new_dataset", voxel_size=(1, 1, 1))
    layer = dataset.add_layer(layer_name="color",
                              category="color",
                              dtype_per_channel="uint8",
                              num_channels=3)
    mag1 = layer.add_mag("1")
    mag2 = layer.add_mag("2")

    ##########################
    # Writing into a dataset #
    ##########################

    # The properties are updated automatically
    # when the written data exceeds the bounding box in the properties
    mag1.write(
        absolute_offset=(10, 20, 30),
        # assuming the layer has 3 channels:
        data=(np.random.rand(3, 512, 512, 32) * 255).astype(np.uint8),
    )

    mag2.write(
        absolute_offset=(10, 20, 30),
        data=(np.random.rand(3, 256, 256, 16) * 255).astype(np.uint8),
    )

    ##########################
    # Reading from a dataset #
    ##########################

    data_in_mag1 = mag1.read(
    )  # the offset and size from the properties are used
    data_in_mag1_subset = mag1.read(absolute_offset=(10, 20, 30),
                                    size=(512, 512, 32))

    data_in_mag2 = mag2.read()
    data_in_mag2_subset = mag2.read(absolute_offset=(10, 20, 30),
                                    size=(512, 512, 32))
    assert data_in_mag2_subset.shape == (3, 256, 256, 16)

    #####################
    # Copying a dataset #
    #####################

    copy_of_dataset = dataset.copy_dataset(
        "testoutput/copy_of_dataset",
        chunk_size=8,
        chunks_per_shard=8,
        compress=True,
    )
    new_layer = dataset.add_layer(
        layer_name="segmentation",
        category="segmentation",
        dtype_per_channel="uint8",
        largest_segment_id=0,
    )
    # Link a layer of the initial dataset to the copy:
    sym_layer = copy_of_dataset.add_symlink_layer(new_layer)