Ejemplo n.º 1
0
def load_module(module_name: str,
                defines: Optional[dict] = None,
                verbose_build: bool = False,
                build_timeout: int = 300):
    """
    Handles the loading of c++ extension modules.

    Args:
        module_name: Name of the module to load.
            Must match the name of the relevant source directory in the `_extensions` directory.
        defines: Dictionary containing names and values of compilation defines.
        verbose_build: Set to true to enable build logging.
        build_timeout: Time in seconds before the build will throw an exception to prevent hanging.
    """

    # Ensuring named module exists in _extensions directory.
    module_dir = path.join(dir_path, module_name)
    if not path.exists(module_dir):
        raise ValueError(f"No extension module named {module_name}")

    platform_str = f"_{platform.system()}_{platform.python_version()}_"
    platform_str += "".join(f"{v}" for v in get_torch_version_tuple()[:2])
    # Adding configuration to module name.
    if defines is not None:
        module_name = "_".join([module_name] +
                               [f"{v}" for v in defines.values()])

    # Gathering source files.
    source = glob(path.join(module_dir, "**", "*.cpp"), recursive=True)
    if torch.cuda.is_available():
        source += glob(path.join(module_dir, "**", "*.cu"), recursive=True)
        platform_str += f"_{torch.version.cuda}"

    # Constructing compilation argument list.
    define_args = [] if not defines else [
        f"-D {key}={defines[key]}" for key in defines
    ]

    # Ninja may be blocked by something out of our control.
    # This will error if the build takes longer than expected.
    with timeout(
            build_timeout,
            "Build appears to be blocked. Is there a stopped process building the same extension?"
    ):
        load, _ = optional_import(
            "torch.utils.cpp_extension",
            name="load")  # main trigger some JIT config in pytorch
        # This will either run the build or return the existing .so object.
        name = module_name + platform_str.replace(".", "_")
        module = load(
            name=name,
            sources=source,
            extra_cflags=define_args,
            extra_cuda_cflags=define_args,
            verbose=verbose_build,
        )

    return module
Ejemplo n.º 2
0
def _get_all_ngc_models(pattern, page_index=0, page_size=50):
    url = "https://api.ngc.nvidia.com/v2/search/catalog/resources/MODEL"
    query_dict = {
        "query":
        "",
        "orderBy": [{
            "field": "score",
            "value": "DESC"
        }],
        "queryFields":
        ["all", "description", "displayName", "name", "resourceId"],
        "fields": [
            "isPublic",
            "attributes",
            "guestAccess",
            "name",
            "orgName",
            "teamName",
            "displayName",
            "dateModified",
            "labels",
            "description",
        ],
        "page":
        0,
    }

    filter = [dict(field="name", value=f"*{pattern}*")]
    query_dict["page"] = page_index
    query_dict["pageSize"] = page_size
    query_dict["filters"] = filter
    query_str = json.dumps(query_dict)
    full_url = f"{url}?q={query_str}"
    requests_get, has_requests = optional_import("requests", name="get")
    if has_requests:
        resp = requests_get(full_url)
        resp.raise_for_status()
    else:
        raise ValueError(
            "NGC API requires requests package.  Please install it.")
    model_list = json.loads(resp.text)
    model_dict = {}
    for result in model_list["results"]:
        for model in result["resources"]:
            current_res_id = model["resourceId"]
            model_dict[current_res_id] = {"name": model["name"]}
            for attribute in model["attributes"]:
                if attribute["key"] == "latestVersionIdStr":
                    model_dict[current_res_id]["latest"] = attribute["value"]
    return model_dict
Ejemplo n.º 3
0
def download_mmar(item, mmar_dir=None, progress: bool = True):
    """
    Download and extract Medical Model Archive (MMAR) from Nvidia Clara Train.

    See Also:
        - https://docs.nvidia.com/clara/
        - Nvidia NGC Registry CLI
        - https://docs.nvidia.com/clara/clara-train-sdk/pt/mmar.html

    Args:
        item: the corresponding model item from `MODEL_DESC`.
        mmar_dir: target directory to store the MMAR, default is mmars subfolder under `torch.hub get_dir()`.
        progress: whether to display a progress bar.

    Examples::
        >>> from monai.apps import download_mmar
        >>> download_mmar("clara_pt_prostate_mri_segmentation_1", mmar_dir=".")

    Returns:
        The local directory of the downloaded model.
    """
    if not isinstance(item, Mapping):
        item = _get_model_spec(item)
    if not mmar_dir:
        get_dir, has_home = optional_import("torch.hub", name="get_dir")
        if has_home:
            mmar_dir = os.path.join(get_dir(), "mmars")
        else:
            raise ValueError(
                "mmar_dir=None, but no suitable default directory computed. Upgrade Pytorch to 1.6+ ?"
            )

    model_dir = os.path.join(mmar_dir, item[Keys.ID])
    download_and_extract(
        url=item[Keys.URL],
        filepath=os.path.join(mmar_dir,
                              f"{item[Keys.ID]}.{item[Keys.FILE_TYPE]}"),
        output_dir=model_dir,
        hash_val=item[Keys.HASH_VAL],
        hash_type=item[Keys.HASH_TYPE],
        file_type=item[Keys.FILE_TYPE],
        has_base=False,
        progress=progress,
    )
    return model_dir
Ejemplo n.º 4
0
from monai.data import DataLoader, Dataset, create_test_image_2d
from monai.data.image_reader import PILReader
from monai.transforms import LoadImage, LoadImaged
from monai.transforms.io.array import switch_endianness
from monai.utils.enums import PostFix
from monai.utils.module import optional_import

if TYPE_CHECKING:
    import nibabel as nib
    from PIL import Image as PILImage

    has_nib = True
    has_pil = True
else:
    nib, has_nib = optional_import("nibabel")
    PILImage, has_pil = optional_import("PIL.Image")

TESTS: List[Tuple] = []
for endianness in ["<", ">"]:
    for use_array in [True, False]:
        for image_only in [True, False]:
            TESTS.append((endianness, use_array, image_only))


class TestNiftiEndianness(unittest.TestCase):
    def setUp(self):
        self.im, _ = create_test_image_2d(100, 100)
        self.fname = tempfile.NamedTemporaryFile(suffix=".nii.gz").name

    @parameterized.expand(TESTS)
from monai.data.dataset import Dataset
from monai.data.utils import list_data_collate, pad_list_data_collate
from monai.transforms.compose import Compose
from monai.transforms.inverse import InvertibleTransform
from monai.transforms.inverse_batch_transform import BatchInverseTransform
from monai.transforms.transform import Randomizable
from monai.transforms.utils import allow_missing_keys_mode, convert_inverse_interp_mode
from monai.utils.enums import CommonKeys, InverseKeys
from monai.utils.module import optional_import

if TYPE_CHECKING:
    from tqdm import tqdm

    has_tqdm = True
else:
    tqdm, has_tqdm = optional_import("tqdm", name="tqdm")

__all__ = ["TestTimeAugmentation"]


class TestTimeAugmentation:
    """
    Class for performing test time augmentations. This will pass the same image through the network multiple times.

    The user passes transform(s) to be applied to each realisation, and provided that at least one of those transforms
    is random, the network's output will vary. Provided that inverse transformations exist for all supplied spatial
    transforms, the inverse can be applied to each realisation of the network's output. Once in the same spatial
    reference, the results can then be combined and metrics computed.

    Test time augmentations are a useful feature for computing network uncertainty, as well as observing the network's
    dependency on the applied random transforms.
Ejemplo n.º 6
0
def load_from_mmar(
    item,
    mmar_dir=None,
    progress: bool = True,
    map_location=None,
    pretrained=True,
    weights_only=False,
    model_key: str = "model",
):
    """
    Download and extract Medical Model Archive (MMAR) model weights from Nvidia Clara Train.

    Args:
        item: the corresponding model item from `MODEL_DESC`.
        mmar_dir: : target directory to store the MMAR, default is mmars subfolder under `torch.hub get_dir()`.
        progress: whether to display a progress bar when downloading the content.
        map_location: pytorch API parameter for `torch.load` or `torch.jit.load`.
        pretrained: whether to load the pretrained weights after initializing a network module.
        weights_only: whether to load only the weights instead of initializing the network module and assign weights.
        model_key: a key to search in the model file or config file for the model dictionary.
            Currently this function assumes that the model dictionary has
            `{"[name|path]": "test.module", "args": {'kw': 'test'}}`.

    Examples::
        >>> from monai.apps import load_from_mmar
        >>> unet_model = load_from_mmar("clara_pt_prostate_mri_segmentation_1", mmar_dir=".", map_location="cpu")
        >>> print(unet_model)

    See Also:
        https://docs.nvidia.com/clara/
    """
    if not isinstance(item, Mapping):
        item = _get_model_spec(item)
    model_dir = download_mmar(item=item, mmar_dir=mmar_dir, progress=progress)
    model_file = os.path.join(model_dir, item[Keys.MODEL_FILE])
    print(f'\n*** "{item[Keys.ID]}" available at {model_dir}.')

    # loading with `torch.jit.load`
    if f"{model_file}".endswith(".ts"):
        if not pretrained:
            warnings.warn(
                "Loading a ScriptModule, 'pretrained' option ignored.")
        if weights_only:
            warnings.warn(
                "Loading a ScriptModule, 'weights_only' option ignored.")
        return torch.jit.load(model_file, map_location=map_location)

    # loading with `torch.load`
    model_dict = torch.load(model_file, map_location=map_location)
    if weights_only:
        return model_dict.get(
            model_key,
            model_dict)  # model_dict[model_key] or model_dict directly

    # 1. search `model_dict['train_config]` for model config spec.
    model_config = _get_val(dict(model_dict).get("train_conf", {}),
                            key=model_key,
                            default={})
    if not model_config:
        # 2. search json CONFIG_FILE for model config spec.
        json_path = os.path.join(
            model_dir, item.get(Keys.CONFIG_FILE, "config_train.json"))
        with open(json_path) as f:
            conf_dict = json.load(f)
        conf_dict = dict(conf_dict)
        model_config = _get_val(conf_dict, key=model_key, default={})
    if not model_config:
        # 3. search `model_dict` for model config spec.
        model_config = _get_val(dict(model_dict), key=model_key, default={})

    if not (model_config and isinstance(model_config, Mapping)):
        raise ValueError(
            f"Could not load model config dictionary from config: {item.get(Keys.CONFIG_FILE)}, "
            f"or from model file: {item.get(Keys.MODEL_FILE)}.")

    # parse `model_config` for model class and model parameters
    if model_config.get("name"):  # model config section is a "name"
        model_name = model_config["name"]
        model_cls = monai_nets.__dict__[model_name]
    elif model_config.get("path"):  # model config section is a "path"
        # https://docs.nvidia.com/clara/clara-train-sdk/pt/byom.html
        model_module, model_name = model_config.get("path", ".").rsplit(".", 1)
        model_cls, has_cls = optional_import(module=model_module,
                                             name=model_name)
        if not has_cls:
            raise ValueError(
                f"Could not load MMAR model config {model_config.get('path', '')}, "
                f"Please make sure MMAR's sub-folders in '{model_dir}' is on the PYTHONPATH."
                "See also: https://docs.nvidia.com/clara/clara-train-sdk/pt/byom.html"
            )
    else:
        raise ValueError(f"Could not load model config {model_config}.")

    print(f"*** Model: {model_cls}")
    model_kwargs = model_config.get("args", None)
    if model_kwargs:
        model_inst = model_cls(**model_kwargs)
        print(f"*** Model params: {model_kwargs}")
    else:
        model_inst = model_cls()
    if pretrained:
        model_inst.load_state_dict(model_dict.get(model_key, model_dict))
    print("\n---")
    print(f"For more information, please visit {item[Keys.DOC]}\n")
    return model_inst
Ejemplo n.º 7
0
import torch

from monai.config import IgniteInfo
from monai.utils.module import min_version, optional_import

try:
    import matplotlib.pyplot as plt

    has_matplotlib = True
except ImportError:
    has_matplotlib = False

if TYPE_CHECKING:
    from ignite.engine import Engine, Events
else:
    Engine, _ = optional_import("ignite.engine", IgniteInfo.OPT_IMPORT_VERSION,
                                min_version, "Engine")
    Events, _ = optional_import("ignite.engine", IgniteInfo.OPT_IMPORT_VERSION,
                                min_version, "Events")

LOSS_NAME = "loss"


def plot_metric_graph(
        ax,
        title: str,
        graphmap: Dict[str, Union[List[float], Tuple[List[float],
                                                     List[float]]]],
        yscale: str = "log",
        avg_keys: Tuple[str] = (LOSS_NAME, ),
        window_fraction: int = 20,
):
Ejemplo n.º 8
0
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

from typing import Optional

import numpy as np

from monai.config.type_definitions import NdarrayOrTensor
from monai.transforms.croppad.array import SpatialPad
from monai.transforms.utils import rescale_array
from monai.transforms.utils_pytorch_numpy_unification import repeat, where
from monai.utils.module import optional_import
from monai.utils.type_conversion import convert_data_type, convert_to_dst_type

plt, _ = optional_import("matplotlib", name="pyplot")
cm, _ = optional_import("matplotlib", name="cm")

__all__ = ["matshow3d", "blend_images"]


def matshow3d(
    volume,
    fig=None,
    title: Optional[str] = None,
    figsize=(10, 10),
    frames_per_row: Optional[int] = None,
    frame_dim: int = -3,
    channel_dim: Optional[int] = None,
    vmin=None,
    vmax=None,
Ejemplo n.º 9
0
def download_mmar(item,
                  mmar_dir=None,
                  progress: bool = True,
                  api: bool = False,
                  version: int = -1):
    """
    Download and extract Medical Model Archive (MMAR) from Nvidia Clara Train.

    See Also:
        - https://docs.nvidia.com/clara/
        - Nvidia NGC Registry CLI
        - https://docs.nvidia.com/clara/clara-train-sdk/pt/mmar.html

    Args:
        item: the corresponding model item from `MODEL_DESC`.
          Or when api is True, the substring to query NGC's model name field.
        mmar_dir: target directory to store the MMAR, default is `mmars` subfolder under `torch.hub get_dir()`.
        progress: whether to display a progress bar.
        api: whether to query NGC and download via api
        version: which version of MMAR to download.  -1 means the latest from ngc.

    Examples::
        >>> from monai.apps import download_mmar
        >>> download_mmar("clara_pt_prostate_mri_segmentation_1", mmar_dir=".")
        >>> download_mmar("prostate_mri_segmentation", mmar_dir=".", api=True)


    Returns:
        The local directory of the downloaded model.
        If api is True, a list of local directories of downloaded models.
    """
    if not mmar_dir:
        get_dir, has_home = optional_import("torch.hub", name="get_dir")
        if has_home:
            mmar_dir = os.path.join(get_dir(), "mmars")
        else:
            raise ValueError(
                "mmar_dir=None, but no suitable default directory computed. Upgrade Pytorch to 1.6+ ?"
            )

    if api:
        model_dict = _get_all_ngc_models(item)
        if len(model_dict) == 0:
            raise ValueError(
                f"api query returns no item for pattern {item}.  Please change or shorten it."
            )
        model_dir_list = []
        for k, v in model_dict.items():
            ver = v["latest"] if version == -1 else str(version)
            download_url = _get_ngc_url(k, ver)
            model_dir = os.path.join(mmar_dir, v["name"])
            download_and_extract(
                url=download_url,
                filepath=os.path.join(mmar_dir, f'{v["name"]}_{ver}.zip'),
                output_dir=model_dir,
                hash_val=None,
                hash_type="md5",
                file_type="zip",
                has_base=False,
                progress=progress,
            )
            model_dir_list.append(model_dir)
        return model_dir_list

    if not isinstance(item, Mapping):
        item = get_model_spec(item)

    ver = item.get(Keys.VERSION, 1)
    if version > 0:
        ver = str(version)
    model_fullname = f"{item[Keys.NAME]}_{ver}"
    model_dir = os.path.join(mmar_dir, model_fullname)
    model_url = item.get(Keys.URL) or _get_ngc_url(
        item[Keys.NAME], version=ver, model_prefix="nvidia/med/")
    download_and_extract(
        url=model_url,
        filepath=os.path.join(mmar_dir,
                              f"{model_fullname}.{item[Keys.FILE_TYPE]}"),
        output_dir=model_dir,
        hash_val=item[Keys.HASH_VAL],
        hash_type=item[Keys.HASH_TYPE],
        file_type=item[Keys.FILE_TYPE],
        has_base=False,
        progress=progress,
    )
    return model_dir
Ejemplo n.º 10
0
from unittest.case import skipUnless

import numpy as np
from parameterized import parameterized

from monai.data import DataLoader, Dataset, create_test_image_2d
from monai.transforms import LoadImage, LoadImaged
from monai.transforms.io.array import switch_endianness
from monai.utils.module import optional_import

if TYPE_CHECKING:
    import nibabel as nib

    has_nib = True
else:
    nib, has_nib = optional_import("nibabel")

TESTS: List[Tuple] = []
for endianness in ["<", ">"]:
    for use_array in [True, False]:
        for image_only in [True, False]:
            TESTS.append((endianness, use_array, image_only))


class TestNiftiEndianness(unittest.TestCase):
    def setUp(self):
        self.im, _ = create_test_image_2d(100, 100)
        self.fname = tempfile.NamedTemporaryFile(suffix=".nii.gz").name

    @parameterized.expand(TESTS)
    @skipUnless(has_nib, "Requires NiBabel")
Ejemplo n.º 11
0
def load_from_mmar(
    item,
    mmar_dir: Optional[PathLike] = None,
    progress: bool = True,
    version: int = -1,
    map_location=None,
    pretrained=True,
    weights_only=False,
    model_key: str = "model",
    api: bool = True,
    model_file=None,
):
    """
    Download and extract Medical Model Archive (MMAR) model weights from Nvidia Clara Train.

    Args:
        item: the corresponding model item from `MODEL_DESC`.
        mmar_dir: : target directory to store the MMAR, default is mmars subfolder under `torch.hub get_dir()`.
        progress: whether to display a progress bar when downloading the content.
        version: version number of the MMAR. Set it to `-1` to use `item[Keys.VERSION]`.
        map_location: pytorch API parameter for `torch.load` or `torch.jit.load`.
        pretrained: whether to load the pretrained weights after initializing a network module.
        weights_only: whether to load only the weights instead of initializing the network module and assign weights.
        model_key: a key to search in the model file or config file for the model dictionary.
            Currently this function assumes that the model dictionary has
            `{"[name|path]": "test.module", "args": {'kw': 'test'}}`.
        api: whether to query NGC API to get model infomation.
        model_file: the relative path to the model file within an MMAR.

    Examples::
        >>> from monai.apps import load_from_mmar
        >>> unet_model = load_from_mmar("clara_pt_prostate_mri_segmentation_1", mmar_dir=".", map_location="cpu")
        >>> print(unet_model)

    See Also:
        https://docs.nvidia.com/clara/
    """
    if api:
        item = {
            Keys.NAME:
            get_model_spec(item)[Keys.NAME]
            if isinstance(item, int) else f"{item}"
        }
    if not isinstance(item, Mapping):
        item = get_model_spec(item)
    model_dir = download_mmar(item=item,
                              mmar_dir=mmar_dir,
                              progress=progress,
                              version=version,
                              api=api)
    if model_file is None:
        model_file = os.path.join("models", "model.pt")
    model_file = model_dir / item.get(Keys.MODEL_FILE, model_file)
    logger.info(f'\n*** "{item.get(Keys.NAME)}" available at {model_dir}.')

    # loading with `torch.jit.load`
    if model_file.name.endswith(".ts"):
        if not pretrained:
            warnings.warn(
                "Loading a ScriptModule, 'pretrained' option ignored.")
        if weights_only:
            warnings.warn(
                "Loading a ScriptModule, 'weights_only' option ignored.")
        return torch.jit.load(model_file, map_location=map_location)

    # loading with `torch.load`
    model_dict = torch.load(model_file, map_location=map_location)
    if weights_only:
        return model_dict.get(
            model_key,
            model_dict)  # model_dict[model_key] or model_dict directly

    # 1. search `model_dict['train_config]` for model config spec.
    model_config = _get_val(dict(model_dict).get("train_conf", {}),
                            key=model_key,
                            default={})
    if not model_config or not isinstance(model_config, Mapping):
        # 2. search json CONFIG_FILE for model config spec.
        json_path = model_dir / item.get(
            Keys.CONFIG_FILE, os.path.join("config", "config_train.json"))
        with open(json_path) as f:
            conf_dict = json.load(f)
        conf_dict = dict(conf_dict)
        model_config = _get_val(conf_dict, key=model_key, default={})
    if not model_config:
        # 3. search `model_dict` for model config spec.
        model_config = _get_val(dict(model_dict), key=model_key, default={})

    if not (model_config and isinstance(model_config, Mapping)):
        raise ValueError(
            f"Could not load model config dictionary from config: {item.get(Keys.CONFIG_FILE)}, "
            f"or from model file: {item.get(Keys.MODEL_FILE)}.")

    # parse `model_config` for model class and model parameters
    if model_config.get("name"):  # model config section is a "name"
        model_name = model_config["name"]
        model_cls = monai_nets.__dict__[model_name]
    elif model_config.get("path"):  # model config section is a "path"
        # https://docs.nvidia.com/clara/clara-train-sdk/pt/byom.html
        model_module, model_name = model_config.get("path", ".").rsplit(".", 1)
        model_cls, has_cls = optional_import(module=model_module,
                                             name=model_name)
        if not has_cls:
            raise ValueError(
                f"Could not load MMAR model config {model_config.get('path', '')}, "
                f"Please make sure MMAR's sub-folders in '{model_dir}' is on the PYTHONPATH."
                "See also: https://docs.nvidia.com/clara/clara-train-sdk/pt/byom.html"
            )
    else:
        raise ValueError(f"Could not load model config {model_config}.")

    logger.info(f"*** Model: {model_cls}")
    model_kwargs = model_config.get("args", None)
    if model_kwargs:
        model_inst = model_cls(**model_kwargs)
        logger.info(f"*** Model params: {model_kwargs}")
    else:
        model_inst = model_cls()
    if pretrained:
        _, changed, unchanged = copy_model_state(model_inst,
                                                 model_dict.get(
                                                     model_key, model_dict),
                                                 inplace=True)
        if not (changed
                and not unchanged):  # not all model_inst varaibles are changed
            logger.warning(
                f"*** Loading model state -- unchanged: {len(unchanged)}, changed: {len(changed)}."
            )
    logger.info("\n---")
    doc_url = item.get(Keys.DOC) or _get_ngc_doc_url(
        item[Keys.NAME], model_prefix="nvidia:med:")
    logger.info(f"For more information, please visit {doc_url}\n")
    return model_inst
Ejemplo n.º 12
0
    RandAxisFlipd,
    RandGridDistortiond,
    RandRotate90d,
    Resized,
    Spacingd,
)
from monai.utils.enums import CommonKeys
from monai.utils.module import optional_import

if TYPE_CHECKING:
    import matplotlib.pyplot as plt

    has_matplotlib = True

else:
    plt, has_matplotlib = optional_import("matplotlib.pyplot")


def get_data(keys):
    """Get the example data to be used.

    Use MarsAtlas as it only contains 1 image for quick download and
    that image is parcellated.
    """
    cache_dir = os.environ.get("MONAI_DATA_DIRECTORY") or tempfile.mkdtemp()
    fname = "MarsAtlas-MNI-Colin27.zip"
    url = "https://www.dropbox.com/s/ndz8qtqblkciole/" + fname + "?dl=1"
    out_path = os.path.join(cache_dir, "MarsAtlas-MNI-Colin27")
    zip_path = os.path.join(cache_dir, fname)

    download_and_extract(url, zip_path, out_path)
Ejemplo n.º 13
0
# You may obtain a copy of the License at
#     http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

from typing import Dict, Optional, Union, cast

import torch
import torch.nn as nn

from monai.utils.module import optional_import

models, _ = optional_import("torchvision.models")


class MILModel(nn.Module):
    """
    Multiple Instance Learning (MIL) model, with a backbone classification model.
    Currently, it only works for 2D images, a typical use case is for classification of the
    digital pathology whole slide images. The expected shape of input data is `[B, N, C, H, W]`,
    where `B` is the batch_size of PyTorch Dataloader and `N` is the number of instances
    extracted from every original image in the batch. A tutorial example is available at:
    https://github.com/Project-MONAI/tutorials/tree/master/pathology/multiple_instance_learning.

    Args:
        num_classes: number of output classes.
        mil_mode: MIL algorithm, available values (Defaults to ``"att"``):
Ejemplo n.º 14
0
# Copyright (c) MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#     http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import torch

from monai.utils.module import optional_import

_C, _ = optional_import("monai._C")

__all__ = ["BilateralFilter", "PHLFilter"]


class BilateralFilter(torch.autograd.Function):
    """
    Blurs the input tensor spatially whilst preserving edges. Can run on 1D, 2D, or 3D,
    tensors (on top of Batch and Channel dimensions). Two implementations are provided,
    an exact solution and a much faster approximation which uses a permutohedral lattice.

    See:
        https://en.wikipedia.org/wiki/Bilateral_filter
        https://graphics.stanford.edu/papers/permutohedral/

    Args:
Ejemplo n.º 15
0
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

from __future__ import annotations

from functools import partial
from typing import Callable

import torch

from monai.networks.utils import replace_modules_temp
from monai.utils.module import optional_import
from monai.visualize.class_activation_maps import ModelWithHooks

trange, has_trange = optional_import("tqdm", name="trange")


__all__ = ["VanillaGrad", "SmoothGrad", "GuidedBackpropGrad", "GuidedBackpropSmoothGrad"]


class _AutoGradReLU(torch.autograd.Function):
    @staticmethod
    def forward(ctx, x):
        pos_mask = (x > 0).type_as(x)
        output = torch.mul(x, pos_mask)
        ctx.save_for_backward(x, output)
        return output

    @staticmethod
    def backward(ctx, grad_output):
# limitations under the License.

import unittest
from copy import deepcopy

import numpy as np
import torch
from parameterized import parameterized

from monai.data.synthetic import create_test_image_2d, create_test_image_3d
from monai.transforms import RandGibbsNoised
from monai.utils.misc import set_determinism
from monai.utils.module import optional_import
from tests.utils import TEST_NDARRAYS

_, has_torch_fft = optional_import("torch.fft", name="fftshift")

TEST_CASES = []
for shape in ((128, 64), (64, 48, 80)):
    for input_type in TEST_NDARRAYS if has_torch_fft else [np.array]:
        TEST_CASES.append((shape, input_type))

KEYS = ["im", "label"]


class TestRandGibbsNoised(unittest.TestCase):
    def setUp(self):
        set_determinism(0)
        super().setUp()

    def tearDown(self):
Ejemplo n.º 17
0
Archivo: dist.py Proyecto: Nic-Ma/MONAI
#     http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

from typing import List

import torch
import torch.distributed as dist

from monai.config import IgniteInfo
from monai.utils.module import min_version, optional_import

idist, has_ignite = optional_import("ignite", IgniteInfo.OPT_IMPORT_VERSION, min_version, "distributed")

__all__ = ["get_dist_device", "evenly_divisible_all_gather", "string_list_all_gather"]


def get_dist_device():
    """
    Get the expected target device in the native PyTorch distributed data parallel.
    For NCCL backend, return GPU device of current process.
    For GLOO backend, return CPU.
    For any other backends, return None as the default, tensor.to(None) will not change the device.

    """
    if dist.is_initialized():
        backend = dist.get_backend()
        if backend == "nccl" and torch.cuda.is_available():
Ejemplo n.º 18
0
# limitations under the License.

import os
import platform
import re
import sys
from collections import OrderedDict

import numpy as np
import torch

import monai
from monai.utils.module import OptionalImportError, get_package_version, optional_import

try:
    _, HAS_EXT = optional_import("monai._C")
    USE_COMPILED = HAS_EXT and os.getenv("BUILD_MONAI", "0") == "1"
except (OptionalImportError, ImportError, AttributeError):
    HAS_EXT = USE_COMPILED = False

USE_META_DICT = os.environ.get(
    "USE_META_DICT",
    "0") == "1"  # set to True for compatibility, use meta dict.

psutil, has_psutil = optional_import("psutil")
psutil_version = psutil.__version__ if has_psutil else "NOT INSTALLED or UNKNOWN VERSION."

__all__ = [
    "print_config",
    "get_system_info",
    "print_system_info",
Ejemplo n.º 19
0
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#     http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import torch

from monai.utils import evenly_divisible_all_gather
from monai.utils.module import optional_import

hvd, has_hvd = optional_import("horovod", name="torch")


class HvdEvenlyDivisibleAllGather:
    def test_data(self):
        # initialize Horovod
        hvd.init()
        if torch.cuda.is_available():
            torch.cuda.set_device(hvd.local_rank())
        self._run()

    def _run(self):
        if hvd.rank() == 0:
            data1 = torch.tensor([[1, 2], [3, 4]])
            data2 = torch.tensor([[1.0, 2.0]])
            data3 = torch.tensor(7)