示例#1
0
def test_writing_to_precomputed_chunks(tmp_path: Path, data: Array5D):
    datasource = ArrayDataSource.from_array5d(data,
                                              tile_shape=Shape5D(x=10, y=10))
    scale = PrecomputedChunksScale.from_datasource(datasource=datasource,
                                                   key=Path("my_test_data"),
                                                   encoding=RawEncoder())
    info = PrecomputedChunksInfo(
        data_type=datasource.dtype,
        type_="image",
        num_channels=datasource.shape.c,
        scales=tuple([scale]),
    )
    sink_path = Path("mytest.precomputed")
    filesystem = OsFs(tmp_path.as_posix())

    datasink = PrecomputedChunksSink.create(
        filesystem=filesystem,
        base_path=sink_path,
        info=info,
    ).scale_sinks[0]

    for tile in datasource.roi.get_datasource_tiles():
        datasink.write(tile.retrieve())

    precomp_datasource = PrecomputedChunksDataSource(
        path=sink_path, filesystem=filesystem, resolution=scale.resolution)
    reloaded_data = precomp_datasource.retrieve()
    assert reloaded_data == data
示例#2
0
def test_writing_to_precomputed_chunks():
    tmp_path = create_tmp_dir(prefix="test_writing_to_precomputed_chunks")
    datasource = ArrayDataSource(data=data, tile_shape=Shape5D(x=10, y=10))
    scale = PrecomputedChunksScale.from_datasource(
        datasource=datasource,
        key=PurePosixPath("my_test_data"),
        encoding=RawEncoder())
    sink_path = PurePosixPath("mytest.precomputed")
    filesystem = OsFs(tmp_path.as_posix())

    datasink = PrecomputedChunksScaleSink(
        filesystem=filesystem,
        info_dir=sink_path,
        scale=scale,
        dtype=datasource.dtype,
        num_channels=datasource.shape.c,
    )
    creation_result = datasink.create()
    if isinstance(creation_result, Exception):
        raise creation_result

    for tile in datasource.roi.get_datasource_tiles():
        datasink.write(tile.retrieve())

    precomp_datasource = PrecomputedChunksDataSource(
        path=sink_path, filesystem=filesystem, resolution=scale.resolution)
    reloaded_data = precomp_datasource.retrieve()
    assert reloaded_data == data
示例#3
0
def test_bucket_read_write():
    raw_data_source = get_sample_c_cells_datasource()
    bucket_fs = get_test_output_bucket_fs()

    precomp_path = PurePosixPath("c_cells_1.precomputed")
    sink = PrecomputedChunksScaleSink(
        info_dir=precomp_path,
        filesystem=bucket_fs,
        num_channels=raw_data_source.shape.c,
        scale=PrecomputedChunksScale(
            key=PurePosixPath("exported_data"),
            size=(raw_data_source.shape.x, raw_data_source.shape.y,
                  raw_data_source.shape.z),
            chunk_sizes=tuple([
                (raw_data_source.tile_shape.x, raw_data_source.tile_shape.y,
                 raw_data_source.tile_shape.z)
            ]),
            encoding=RawEncoder(),
            voxel_offset=(raw_data_source.location.x,
                          raw_data_source.location.y,
                          raw_data_source.location.z),
            resolution=raw_data_source.spatial_resolution),
        dtype=raw_data_source.dtype,
    )

    sink_writer = sink.create()
    assert not isinstance(sink_writer, Exception)

    assert bucket_fs.exists(precomp_path.joinpath("info").as_posix())
    assert not bucket_fs.exists(
        precomp_path.joinpath("i_dont_exist").as_posix())

    with ProcessPoolExecutor() as executor:
        _ = list(
            executor.map(partial(_write_data, sink_writer=sink_writer),
                         raw_data_source.roi.get_datasource_tiles()))

    data_proxy_source = PrecomputedChunksDataSource(
        path=precomp_path,
        filesystem=bucket_fs,
        resolution=(raw_data_source.spatial_resolution))

    retrieved_data = data_proxy_source.retrieve()
    assert np.all(
        retrieved_data.raw("yxc") == raw_data_source.retrieve().raw("yxc"))
示例#4
0
def try_get_datasources_from_url(
    *,
    url: Union[Url, str],
    ebrains_user_token: Optional[UserToken] = None,
    allowed_protocols: Sequence[Protocol] = (Protocol.HTTP, Protocol.HTTPS)
) -> "Sequence[FsDataSource] | Exception":
    if isinstance(url, str):
        parsing_result = parse_url(url)
        if isinstance(parsing_result, UsageError):
            return parsing_result
        url = parsing_result

    if url.protocol not in allowed_protocols:
        return Exception(f"Disallowed protocol: {url.protocol} in {url}")

    if SkimageDataSource.supports_url(url):
        return SkimageDataSource.from_url(url)
    if PrecomputedChunksDataSource.supports_url(url):
        return PrecomputedChunksDataSource.from_url(url)
    return Exception(f"Could not open url {url}")
示例#5
0
def test_writing_to_offset_precomputed_chunks():
    tmp_path = create_tmp_dir(
        prefix="test_writing_to_offset_precomputed_chunks")
    data_at_1000_1000 = data.translated(
        Point5D(x=1000, y=1000) - data.location)
    datasource = ArrayDataSource(data=data_at_1000_1000,
                                 tile_shape=Shape5D(x=10, y=10))
    scale = PrecomputedChunksScale.from_datasource(
        datasource=datasource,
        key=PurePosixPath("my_test_data"),
        encoding=RawEncoder())
    sink_path = PurePosixPath("mytest.precomputed")
    filesystem = OsFs(tmp_path.as_posix())

    print(f"\n\n will write to '{filesystem.geturl(sink_path.as_posix())}' ")

    datasink = PrecomputedChunksScaleSink(
        filesystem=filesystem,
        info_dir=sink_path,
        scale=scale,
        num_channels=datasource.shape.c,
        dtype=datasource.dtype,
    )
    creation_result = datasink.create()
    if isinstance(creation_result, Exception):
        raise creation_result

    for tile in datasource.roi.get_datasource_tiles():
        datasink.write(tile.retrieve())

    precomp_datasource = PrecomputedChunksDataSource(
        path=sink_path, filesystem=filesystem, resolution=scale.resolution)

    reloaded_data = precomp_datasource.retrieve(
        interval=data_at_1000_1000.interval)
    assert (reloaded_data.raw("xyz") == data.raw("xyz")).all()
async def main():
    ilastik_root_url = Url.parse("https://app.ilastik.org/")
    assert ilastik_root_url is not None
    data_url = Url.parse(
        "precomputed://https://app.ilastik.org/public/images/c_cells_2.precomputed"
    )
    assert data_url is not None
    datasources = try_get_datasources_from_url(url=data_url)
    if isinstance(datasources, Exception):
        raise datasources
    assert not isinstance(datasources, Exception)
    ds = datasources[0]
    token = UserToken.from_environment()
    assert isinstance(token, UserToken)

    async with aiohttp.ClientSession(
            cookies={EbrainsLogin.AUTH_COOKIE_KEY: token.access_token
                     }) as session:
        print(f"Creating new session--------------")
        async with session.post(ilastik_root_url.concatpath("api/session").raw,
                                json={"session_duration_minutes":
                                      15}) as response:
            response.raise_for_status()
            session_data: Dict[str, Any] = await response.json()
            session_id = session_data["id"]
        print(
            f"Done creating session: {json.dumps(session_data)} <<<<<<<<<<<<<<<<<<"
        )

        for _ in range(10):
            response = await session.get(
                ilastik_root_url.concatpath(f"api/session/{session_id}").raw)
            response.raise_for_status()
            session_status = await response.json()
            if session_status["status"] == "ready":
                session_url = session_status["url"]
                break
            print(f"Session {session_id} is notready yet")
            _ = await asyncio.sleep(2)
        else:
            raise RuntimeError("Given up waiting on session")

        # exit(1)

        async with session.ws_connect(f"{session_url}/ws") as ws:
            _ = asyncio.get_event_loop().create_task(read_server_status(ws))
            print("sending some feature extractors=======")
            await ws.send_json(
                RPCPayload(applet_name="feature_selection_applet",
                           method_name="add_feature_extractors",
                           arguments={
                               "feature_extractors":
                               tuple(fe.to_json_value()
                                     for fe in get_sample_feature_extractors())
                           }).to_json_value())
            print("done sending feature extractors<<<<<")

            print("sending some annotations=======")
            default_label_names = ["Foreground", "Background"]
            for label_name, label in zip(
                    default_label_names,
                    get_sample_c_cells_pixel_annotations(
                        override_datasource=ds)):
                for a in label.annotations:
                    await ws.send_json(
                        RPCPayload(applet_name="brushing_applet",
                                   method_name="add_annotation",
                                   arguments={
                                       "label_name": label_name,
                                       "annotation": a.to_json_data(),
                                   }).to_json_value())

            print("done sending annotations<<<<<")
            await asyncio.sleep(2)

            print("Enabling live update=======")
            await ws.send_json(
                RPCPayload(applet_name="pixel_classification_applet",
                           method_name="set_live_update",
                           arguments={
                               "live_update": True
                           }).to_json_value())
            await asyncio.sleep(2)

            # from base64 import b64encode
            # encoded_ds: str = b64encode(json.dumps(ds.to_json_value()).encode("utf8"), altchars=b'-_').decode("utf8")

            # response_tasks = {}
            # for tile in ds.roi.get_tiles(tile_shape=Shape5D(x=256, y=256, c=2), tiles_origin=Point5D.zero()):
            #     url = f"{session_url}/predictions/raw_data={encoded_ds}/generation={classifier_generation}/data/{tile.x[0]}-{tile.x[1]}_{tile.y[0]}-{tile.y[1]}_0-1"
            #     print(f"---> Requesting {url}")
            #     response_tasks[tile] = session.get(url)

            # for tile, resp in response_tasks.items():
            #     async with resp as response:
            #         print("Status:", response.status)
            #         print("Content-type:", response.headers['content-type'])

            #         if response.status // 100 != 2:
            #             raise Exception(f"Error: {(await response.content.read()).decode('utf8')}")

            #         tile_bytes = await response.content.read()
            #         print(f"Got predictions<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<")

            #         raw_data = np.frombuffer(tile_bytes, dtype=np.uint8).reshape(2, tile.shape.y, tile.shape.x)
            #         Array5D(raw_data, axiskeys="cyx").show_channels()

            hbp_image_service_bucket_fs = BucketFs(
                bucket_name="hbp-image-service",
                ebrains_user_token=UserToken.get_global_token_or_raise(),
                prefix=PurePosixPath("/"),
            )

            predictions_export_datasink = create_precomputed_chunks_sink(
                shape=ds.shape.updated(c=2),
                dtype=np.dtype("float32"),
                chunk_size=ds.tile_shape.updated(c=2),
                fs=hbp_image_service_bucket_fs,
            )

            print(f"Sending predictions job request??????")
            await ws.send_json(
                RPCPayload(applet_name="export_applet",
                           method_name="start_export_job",
                           arguments={
                               "datasource":
                               ds.to_json_value(),
                               "datasink":
                               predictions_export_datasink.to_json_value(),
                           }).to_json_value())

            simple_segmentation_datasinks = [
                create_precomputed_chunks_sink(
                    shape=ds.shape.updated(c=3),
                    dtype=np.dtype("uint8"),
                    chunk_size=ds.tile_shape.updated(c=3),
                    fs=hbp_image_service_bucket_fs),
                create_precomputed_chunks_sink(
                    shape=ds.shape.updated(c=3),
                    dtype=np.dtype("uint8"),
                    chunk_size=ds.tile_shape.updated(c=3),
                    fs=hbp_image_service_bucket_fs),
            ]

            print(f"Sending simple segmentation job request??????")
            await ws.send_json(
                RPCPayload(applet_name="export_applet",
                           method_name="start_simple_segmentation_export_job",
                           arguments={
                               "datasource":
                               ds.to_json_value(),
                               "datasinks":
                               tuple(ds.to_json_value()
                                     for ds in simple_segmentation_datasinks),
                           }).to_json_value())

            print(f"---> Job successfully scheduled? Waiting for a while")
            await asyncio.sleep(15)
            print(f"Done waiting. Checking outputs")

            predictions_output = PrecomputedChunksDataSource(
                filesystem=hbp_image_service_bucket_fs,
                path=predictions_export_datasink.path,
                resolution=(1, 1, 1))
            for tile in predictions_output.roi.get_datasource_tiles():
                tile.retrieve().as_uint8(normalized=True)  #.show_channels()

            segmentation_output_1 = PrecomputedChunksDataSource(
                filesystem=hbp_image_service_bucket_fs,
                path=simple_segmentation_datasinks[1].path,
                resolution=(1, 1, 1))
            for tile in segmentation_output_1.roi.get_datasource_tiles():
                tile.retrieve()  #.show_images()

            close_url = f"{session_url}/close"
            print(f"Closing session py sending delete to {close_url}")
            r = await session.delete(close_url)
            r.raise_for_status()

        global finished
        finished = True
 def to_datasource(self) -> PrecomputedChunksDataSource:
     return PrecomputedChunksDataSource(
         filesystem=self.filesystem,
         path=self.info_dir,
         resolution=self.scale.resolution,
     )
示例#8
0
def test_pixel_classification_workflow():
    executor = get_executor(hint="server_tile_handler")
    priority_executor = PriorityExecutor(executor=executor,
                                         max_active_job_steps=8)

    workflow = PixelClassificationWorkflow(
        on_async_change=lambda: print(
            json.dumps(workflow.export_applet._get_json_state(), indent=4)),
        executor=executor,
        priority_executor=priority_executor,
    )

    # GUI turns on live update
    _ = workflow.pixel_classifier_applet.set_live_update(dummy_prompt,
                                                         live_update=True)

    # GUI creates some feature extractors

    all_feature_extractors: List[IlpFilter] = []
    for scale in [0.3, 0.7, 1.0, 1.6, 3.5, 5.0, 10.0][0:3]:
        all_feature_extractors.append(
            IlpGaussianSmoothing(ilp_scale=scale, axis_2d="z"))
        all_feature_extractors.append(
            IlpLaplacianOfGaussian(ilp_scale=scale, axis_2d="z"))
        all_feature_extractors.append(
            IlpGaussianGradientMagnitude(ilp_scale=scale, axis_2d="z"))
        all_feature_extractors.append(
            IlpDifferenceOfGaussians(ilp_scale=scale, axis_2d="z"))
        all_feature_extractors.append(
            IlpStructureTensorEigenvalues(ilp_scale=scale, axis_2d="z"))
        all_feature_extractors.append(
            IlpHessianOfGaussianEigenvalues(ilp_scale=scale, axis_2d="z"))

    _ = workflow.feature_selection_applet.add_feature_extractors(
        user_prompt=dummy_prompt,
        feature_extractors=all_feature_extractors,
    )

    pixel_annotations = get_sample_c_cells_pixel_annotations()
    for label_name, label in zip(workflow.brushing_applet.label_names(),
                                 pixel_annotations):
        for a in label.annotations:
            result = workflow.brushing_applet.add_annotation(
                user_prompt=dummy_prompt,
                label_name=label_name,
                annotation=a,
            )
            assert result.is_ok()

    while workflow.pixel_classifier_applet.pixel_classifier() is None:
        time.sleep(0.2)

    classifier = workflow.pixel_classifier_applet.pixel_classifier()
    assert classifier != None

    _ = workflow.save_project(fs=test_output_osfs,
                              path=PurePosixPath("blas.ilp"))

    url = Url.parse(test_output_osfs.geturl('blas.ilp'))
    assert url is not None

    loaded_workflow = PixelClassificationWorkflow.from_ilp(
        ilp_path=Path(url.path),
        on_async_change=lambda: print(
            json.dumps(workflow.export_applet._get_json_state(), indent=4)),
        executor=executor,
        priority_executor=priority_executor,
        allowed_protocols=[Protocol.FILE],
    )
    print("what")
    print(loaded_workflow)
    assert isinstance(loaded_workflow, PixelClassificationWorkflow)
    print(
        f"Loaded workflow and atete pixel aplet description is {loaded_workflow.pixel_classifier_applet._state.description}"
    )

    # # calculate predictions on an entire data source
    raw_data_source = get_sample_c_cells_datasource()
    # preds_future = executor.submit(classifier.compute, raw_data_source.roi)
    # local_predictions = preds_future.result()
    # local_predictions.as_uint8().show_channels()

    # # calculate predictions on just a piece of arbitrary data
    # exported_tile = executor.submit(classifier.compute, DataRoi(datasource=raw_data_source, x=(100, 200), y=(100, 200)))
    # exported_tile.result().show_channels()

    ###################################

    #######################################33

    # run an export job
    output_fs = get_test_output_osfs()
    predictions_export_datasink = create_precomputed_chunks_sink(
        shape=raw_data_source.shape.updated(c=classifier.num_classes),
        dtype=np.dtype("float32"),
        chunk_size=raw_data_source.tile_shape.updated(
            c=classifier.num_classes),
        fs=output_fs)

    print(f"Sending predictions job request??????")
    result = workflow.export_applet.start_export_job(
        datasource=raw_data_source, datasink=predictions_export_datasink)
    assert result is None

    print(f"---> Job successfully scheduled? Waiting for a while")
    wait_until_jobs_completed(workflow=workflow)
    print(f"Done waiting. Checking outputs")

    predictions_output = PrecomputedChunksDataSource(
        filesystem=output_fs,
        path=predictions_export_datasink.path,
        resolution=(1, 1, 1))
    for tile in predictions_output.roi.get_datasource_tiles():
        _ = tile.retrieve().cut(c=1).as_uint8(
            normalized=True)  #.show_channels()

##################################333

    simple_segmentation_datasinks = [
        create_precomputed_chunks_sink(
            shape=raw_data_source.shape.updated(c=3),
            dtype=np.dtype("uint8"),
            chunk_size=raw_data_source.tile_shape.updated(c=3),
            fs=output_fs),
        create_precomputed_chunks_sink(
            shape=raw_data_source.shape.updated(c=3),
            dtype=np.dtype("uint8"),
            chunk_size=raw_data_source.tile_shape.updated(c=3),
            fs=output_fs),
    ]

    print(f"Sending simple segmentation job request??????")
    result = workflow.export_applet.start_simple_segmentation_export_job(
        datasource=raw_data_source,
        datasinks=simple_segmentation_datasinks,
    )

    print(f"---> Job successfully scheduled? Waiting for a while")
    wait_until_jobs_completed(workflow=workflow)
    print(f"Done waiting. Checking outputs")

    segmentation_output_1 = PrecomputedChunksDataSource(
        filesystem=output_fs,
        path=simple_segmentation_datasinks[1].path,
        resolution=(1, 1, 1))
    for tile in segmentation_output_1.roi.get_datasource_tiles():
        _ = tile.retrieve()  #.show_images()

####################################3

    priority_executor.shutdown()
                               choices=["brain", "c_cells"],
                               default="brain")
    _ = argparser.add_argument("--num-tiles")

    args = argparser.parse_args()

    executor: Executor = get_executor(hint="server_tile_handler")

    selected_feature_extractors: Sequence[
        JsonableFeatureExtractor] = args.extractors
    num_tiles = None if args.num_tiles is None else int(args.num_tiles)

    mouse_datasources: List[DataSource] = [
        PrecomputedChunksDataSource(
            filesystem=OsFs(
                Path(__file__).joinpath("../../public/images/").as_posix()),
            path=PurePosixPath(f"mouse{i}.precomputed"),
            resolution=(1, 1, 1)) for i in range(1, 3 + 1)
    ]

    if args.datasource == "brain":
        datasource = PrecomputedChunksDataSource(
            filesystem=OsFs(
                Path(__file__).joinpath("../../public/images/").as_posix()),
            path=PurePosixPath(f"mouse1.precomputed"),
            resolution=(1, 1, 1))
        class1_annotations = [
            Annotation.from_voxels(
                voxels=[
                    Point5D(x=2156, y=1326, z=0),
                    Point5D(x=2157, y=1326, z=0),