def export_to_bioimageio(checkpoint, input_, output, affs_to_bd,
                         additional_formats):

    ckpt_name = os.path.split(checkpoint)[1]

    ndim = 3 if "3d" in ckpt_name else 2
    input_data = _load_data(input_, ndim)

    is_aff_model = "affinity" in ckpt_name
    if is_aff_model and affs_to_bd:
        postprocessing = f"affinities_to_boundaries{ndim}d"
    else:
        postprocessing = None
    if is_aff_model and affs_to_bd:
        is_aff_model = False

    if is_aff_model:
        offsets = [[-1, 0], [0, -1], [-3, 0], [0, -3], [-9, 0], [0, -9],
                   [-27, 0], [0, -27]]
        config = {"mws": {"offsets": offsets}}
    else:
        config = {}

    name = _get_name(is_aff_model, ndim)
    tags = [
        "unet", "neurons", "instance-segmentation", "electron-microscopy",
        "isbi2012-challenge"
    ]
    tags += ["boundary-prediction"
             ] if is_aff_model else ["affinity-prediction"]

    cite = get_default_citations(
        model="UNet2d" if ndim == 2 else "AnisotropicUNet",
        model_output="affinities" if is_aff_model else "boundaries")
    cite["data"] = "https://doi.org/10.3389/fnana.2015.00142"
    doc = _get_doc(is_aff_model, ndim)

    if additional_formats is None:
        additional_formats = []

    export_biomageio_model(
        checkpoint,
        output,
        input_data,
        name=name,
        authors=[{
            "name": "Constantin Pape; @constantinpape"
        }],
        tags=tags,
        license="CC-BY-4.0",
        documentation=doc,
        git_repo="https://github.com/constantinpape/torch-em.git",
        cite=cite,
        model_postprocessing=postprocessing,
        input_optional_parameters=False,
        for_deepimagej="torchscript" in additional_formats,
        links=[get_bioimageio_dataset_id("isbi2012")],
        config=config)
    add_weight_formats(output, additional_formats)
Esempio n. 2
0
def export_to_bioimageio(checkpoint, output, input_, affs_to_bd,
                         additional_formats):

    ckpt_name = os.path.split(checkpoint)[1]
    if input_ is None:
        input_data = None
    else:
        input_data = imageio.imread(input_)
        input_data = input_data[:512, :512]
        input_data = input_data.transpose((2, 0, 1))
        assert input_data.shape[0] == 3, f"{input_data.shape}"

    is_aff_model = "affinity" in ckpt_name
    if is_aff_model and affs_to_bd:
        postprocessing = "affinities_with_foreground_to_boundaries2d"
    else:
        postprocessing = None

    if is_aff_model and affs_to_bd:
        is_aff_model = False
    name = _get_name(is_aff_model)
    tags = ["unet", "instance-segmentation", "nuclei", "whole-slide-imaging"]
    tags += ["boundary-prediction"
             ] if is_aff_model else ["affinity-prediction"]

    # eventually we should refactor the citation logic
    cite = get_default_citations(
        model="UNet2d",
        model_output="affinities" if is_aff_model else "boundaries")
    cite["data"] = "https://ieeexplore.ieee.org/document/8880654"

    doc = _get_doc(is_aff_model)
    if additional_formats is None:
        additional_formats = []

    export_biomageio_model(
        checkpoint,
        output,
        input_data=input_data,
        name=name,
        authors=[{
            "name": "Constantin Pape; @constantinpape"
        }],
        tags=tags,
        license="CC-BY-4.0",
        documentation=doc,
        git_repo="https://github.com/constantinpape/torch-em.git",
        cite=cite,
        model_postprocessing=postprocessing,
        input_optional_parameters=False,
        # need custom deepimagej fields if we have torchscript export
        for_deepimagej="torchscript" in additional_formats,
        links=[get_bioimageio_dataset_id("monuseg")])
    add_weight_formats(output, additional_formats)
Esempio n. 3
0
def export_model():
    import imageio
    import h5py
    from torch_em.util import export_biomageio_model, get_default_citations
    from bioimageio.spec.shared import yaml

    with h5py.File("./data/gt_image_000.h5", "r") as f:
        input_data = f["raw/serum_IgG/s0"][:256, :256]
    imageio.imwrite("./cover.jpg", input_data)

    doc = "Example Model: Different Output Shape"
    cite = get_default_citations(model="UNet2d")

    export_biomageio_model(
        "./checkpoints/diff-output-shape",
        "./exported",
        input_data=input_data,
        authors=[{"name": "Constantin Pape; @constantinpape"}],
        tags=["segmentation"],
        license="CC-BY-4.0",
        documentation=doc,
        git_repo="https://github.com/constantinpape/torch-em.git",
        cite=cite,
        covers=["./cover.jpg"],
        input_optional_parameters=False
    )

    rdf_path = "./exported/rdf.yaml"
    with open(rdf_path, "r") as f:
        rdf = yaml.load(f)

    # update the shape descriptions
    rdf["inputs"][0]["shape"] = {"min": [1, 1, 32, 32], "step": [0, 0, 16, 16]}
    rdf["outputs"][0]["shape"] = {"reference_input": "input", "offset": [0, 0, 0, 0], "scale": [1, 1, 0.5, 0.5]}

    # update the network description
    rdf["source"] = "./resize_unet.py:ResizeUNet"
    rdf["kwargs"] = dict(in_channels=1, out_channels=1, depth=3, initial_features=16)
    copyfile("./resize_unet.py", "./exported/resize_unet.py")

    with open(rdf_path, "w") as f:
        yaml.dump(rdf, f)
Esempio n. 4
0
def export_model():
    import h5py
    from torch_em.util import export_biomageio_model, get_default_citations
    from bioimageio.spec.shared import yaml

    with h5py.File("./data/gt_image_000.h5", "r") as f:
        input_data = f["raw/serum_IgG/s0"][:256, :256]

    doc = "Example Model: Fixed Shape"
    cite = get_default_citations(model="UNet2d")

    export_biomageio_model(
        "./checkpoints/fixed-shape",
        "./exported",
        input_data=input_data,
        authors=[{
            "name": "Constantin Pape; @constantinpape"
        }],
        tags=["segmentation"],
        license="CC-BY-4.0",
        documentation=doc,
        git_repo="https://github.com/constantinpape/torch-em.git",
        cite=cite,
        input_optional_parameters=False)

    shape = (1, 1) + input_data.shape
    assert len(shape) == 4

    # replace the shape
    rdf_path = "./exported/rdf.yaml"
    with open(rdf_path, "r") as f:
        rdf = yaml.load(f)
    rdf["inputs"][0]["shape"] = shape
    rdf["outputs"][0]["shape"] = shape
    with open(rdf_path, "w") as f:
        yaml.dump(rdf, f)
Esempio n. 5
0
def export_model():
    import imageio
    import h5py
    from torch_em.util import add_weight_formats, export_biomageio_model, get_default_citations
    from bioimageio.spec.shared import yaml

    with h5py.File("./data/gt_image_000.h5", "r") as f:
        input_data = [
            f["raw/serum_IgG/s0"][:256, :256],
            f["raw/nuclei/s0"][:256, :256],
        ]
    imageio.imwrite("./cover.jpg", input_data[0])

    doc = "Example Model: Different Output Shape"
    cite = get_default_citations(model="UNet2d")

    export_biomageio_model(
        "./checkpoints/multi-tensor",
        "./exported",
        input_data=input_data,
        authors=[{
            "name": "Constantin Pape; @constantinpape"
        }],
        tags=["segmentation"],
        license="CC-BY-4.0",
        documentation=doc,
        git_repo="https://github.com/constantinpape/torch-em.git",
        cite=cite,
        covers=["./cover.jpg"],
        input_optional_parameters=False)
    add_weight_formats("./exported", ["onnx", "torchscript"])

    rdf_path = "./exported/rdf.yaml"
    with open(rdf_path, "r") as f:
        rdf = yaml.load(f)

    # update the inputs / output descriptions
    rdf["inputs"][0]["name"] = "input0"
    rdf["inputs"][0]["shape"] = {"min": [1, 1, 32, 32], "step": [0, 0, 16, 16]}

    input1 = deepcopy(rdf["inputs"][0])
    input1["name"] = "input1"
    rdf["inputs"].append(input1)

    rdf["outputs"][0]["name"] = "output0"
    rdf["outputs"][0]["shape"] = {
        "reference_input": "input0",
        "offset": [0, 0, 0, 0],
        "scale": [1, 1, 1, 1]
    }

    output1 = deepcopy(rdf["outputs"][0])
    output1["name"] = "output1"
    output1["shape"]["reference_input"] = "input1"
    rdf["outputs"].append(output1)

    # update the network description
    rdf["source"] = "./multi_tensor_unet.py:MultiTensorUNet"
    rdf["kwargs"] = dict(in_channels=2,
                         out_channels=2,
                         depth=3,
                         initial_features=16)
    copyfile("./multi_tensor_unet.py", "./exported/multi_tensor_unet.py")

    with open(rdf_path, "w") as f:
        yaml.dump(rdf, f)
def export_to_bioimageio(checkpoint, output, input_, affs_to_bd,
                         additional_formats):

    root, ckpt_name = os.path.split(checkpoint)
    specimen = os.path.split(root)[0]
    assert specimen in ("ovules", "roots"), specimen

    is2d = checkpoint.endswith('2d')
    ndim = 2 if is2d else 3
    if input_ is None:
        input_data = None
    else:
        input_data = _load_data(input_, is2d)

    is_aff_model = "affinity" in ckpt_name
    if is_aff_model and affs_to_bd:
        postprocessing = "affinities_to_boundaries3d"
    else:
        postprocessing = None

    if is_aff_model and affs_to_bd:
        is_aff_model = False
    name = _get_name(is_aff_model, specimen, ndim)
    tags = [
        "u-net", f"{specimen}-segmentation", "segmentation",
        "light-microscopy", "arabidopsis"
    ]
    if specimen == "ovules":
        tags += ["ovules", "confocal-microscopy"]
    else:
        tags += ["primordial-root", "light-sheet-microscopy"]
    tags += ["boundary-prediction"
             ] if is_aff_model else ["affinity-prediction"]

    # eventually we should refactor the citation logic
    plantseg_pub = "https://doi.org/10.7554/eLife.57613.sa2"
    cite = get_default_citations(
        model="UNet2d" if is2d else "UNet3d",
        model_output="affinities" if is_aff_model else "boundaries")
    cite["data"] = plantseg_pub
    cite["segmentation algorithm"] = plantseg_pub

    doc = _get_doc(is_aff_model, specimen, ndim)

    export_biomageio_model(
        checkpoint,
        output,
        input_data=input_data,
        name=name,
        authors=[{
            "name": "Constantin Pape; @constantinpape"
        }],
        tags=tags,
        license='CC-BY-4.0',
        documentation=doc,
        git_repo='https://github.com/constantinpape/torch-em.git',
        cite=cite,
        model_postprocessing=postprocessing,
        input_optional_parameters=False,
        # need custom deepimagej fields if we have torchscript export
        for_deepimagej="torchscript" in additional_formats,
        links=[get_bioimageio_dataset_id("ovules")])
    add_weight_formats(output, additional_formats)