Ejemplo n.º 1
0
    def __init__(
        self,
        root_dir: str,
        section: str,
        transform: Union[Sequence[Callable], Callable] = LoadPNGd("image"),
        download: bool = False,
        seed: int = 0,
        val_frac: float = 0.1,
        test_frac: float = 0.1,
        cache_num: int = sys.maxsize,
        cache_rate: float = 1.0,
        num_workers: int = 0,
    ) -> None:
        if not os.path.isdir(root_dir):
            raise ValueError("root_dir must be a directory.")
        self.section = section
        self.val_frac = val_frac
        self.test_frac = test_frac
        self.set_random_state(seed=seed)
        tarfile_name = os.path.join(root_dir, self.compressed_file_name)
        dataset_dir = os.path.join(root_dir, self.dataset_folder_name)
        if download:
            download_and_extract(self.resource, tarfile_name, root_dir,
                                 self.md5)

        if not os.path.exists(dataset_dir):
            raise RuntimeError(
                f"can not find dataset directory: {dataset_dir}, please use download=True to download it."
            )
        data = self._generate_data_list(dataset_dir)
        super().__init__(data,
                         transform,
                         cache_num=cache_num,
                         cache_rate=cache_rate,
                         num_workers=num_workers)
Ejemplo n.º 2
0
    def __init__(
        self,
        root_dir: str,
        task: str,
        section: str,
        transform: Union[Sequence[Callable], Callable] = LoadNiftid(["image", "label"]),
        download: bool = False,
        seed: int = 0,
        val_frac: float = 0.2,
        cache_num: int = sys.maxsize,
        cache_rate: float = 1.0,
        num_workers: int = 0,
    ) -> None:
        if not os.path.isdir(root_dir):
            raise ValueError("Root directory root_dir must be a directory.")
        self.section = section
        self.val_frac = val_frac
        self.set_random_state(seed=seed)
        if task not in self.resource:
            raise ValueError(f"Unsupported task: {task}, available options are: {list(self.resource.keys())}.")
        dataset_dir = os.path.join(root_dir, task)
        tarfile_name = f"{dataset_dir}.tar"
        if download:
            download_and_extract(self.resource[task], tarfile_name, root_dir, self.md5[task])

        if not os.path.exists(dataset_dir):
            raise RuntimeError(
                f"Cannot find dataset directory: {dataset_dir}, please use download=True to download it."
            )
        data = self._generate_data_list(dataset_dir)
        super().__init__(data, transform, cache_num=cache_num, cache_rate=cache_rate, num_workers=num_workers)
Ejemplo n.º 3
0
    def initialize(self, args):
        """
        `initialize` is called only once when the model is being loaded.
        Implementing `initialize` function is optional. This function allows
        the model to intialize any state associated with this model.
        """

        # Pull model from google drive
        extract_dir = "/models/mednist_class/1"
        tar_save_path = os.path.join(extract_dir, model_filename)
        download_and_extract(gdrive_url,
                             tar_save_path,
                             output_dir=extract_dir,
                             hash_val=md5_check,
                             hash_type="md5")
        # load model configuration
        self.model_config = json.loads(args['model_config'])

        # create inferer engine and load PyTorch model
        inference_device_kind = args.get('model_instance_kind', None)
        logger.info(f"Inference device: {inference_device_kind}")

        self.inference_device = torch.device('cpu')
        if inference_device_kind is None or inference_device_kind == 'CPU':
            self.inference_device = torch.device('cpu')
        elif inference_device_kind == 'GPU':
            inference_device_id = args.get('model_instance_device_id', '0')
            logger.info(f"Inference device id: {inference_device_id}")

            if torch.cuda.is_available():
                self.inference_device = torch.device(
                    f'cuda:{inference_device_id}')
                cudnn.enabled = True
            else:
                logger.error(
                    f"No CUDA device detected. Using device: {inference_device_kind}"
                )

        # create pre-transforms for MedNIST
        self.pre_transforms = Compose([
            LoadImage(reader="PILReader", image_only=True, dtype=np.float32),
            ScaleIntensity(),
            AddChannel(),
            AddChannel(),
            ToTensor(),
            Lambda(func=lambda x: x.to(device=self.inference_device)),
        ])

        # create post-transforms
        self.post_transforms = Compose([
            Lambda(func=lambda x: x.to(device="cpu")),
        ])

        self.inferer = SimpleInferer()

        self.model = torch.jit.load(
            f'{pathlib.Path(os.path.realpath(__file__)).parent}{os.path.sep}model.pt',
            map_location=self.inference_device)
Ejemplo n.º 4
0
    def __init__(
        self,
        root_dir: str,
        task: str,
        section: str,
        transform: Union[Sequence[Callable], Callable] = (),
        download: bool = False,
        seed: int = 0,
        val_frac: float = 0.2,
        cache_num: int = sys.maxsize,
        cache_rate: float = 1.0,
        num_workers: int = 0,
    ) -> None:
        if not os.path.isdir(root_dir):
            raise ValueError("Root directory root_dir must be a directory.")
        self.section = section
        self.val_frac = val_frac
        self.set_random_state(seed=seed)
        if task not in self.resource:
            raise ValueError(
                f"Unsupported task: {task}, available options are: {list(self.resource.keys())}."
            )
        dataset_dir = os.path.join(root_dir, task)
        tarfile_name = f"{dataset_dir}.tar"
        if download:
            download_and_extract(self.resource[task], tarfile_name, root_dir,
                                 self.md5[task])

        if not os.path.exists(dataset_dir):
            raise RuntimeError(
                f"Cannot find dataset directory: {dataset_dir}, please use download=True to download it."
            )
        self.indices: np.ndarray = np.array([])
        data = self._generate_data_list(dataset_dir)
        # as `release` key has typo in Task04 config file, ignore it.
        property_keys = [
            "name",
            "description",
            "reference",
            "licence",
            "tensorImageSize",
            "modality",
            "labels",
            "numTraining",
            "numTest",
        ]
        self._properties = load_decathlon_properties(
            os.path.join(dataset_dir, "dataset.json"), property_keys)
        if transform == ():
            transform = LoadImaged(["image", "label"])
        CacheDataset.__init__(self,
                              data,
                              transform,
                              cache_num=cache_num,
                              cache_rate=cache_rate,
                              num_workers=num_workers)
Ejemplo n.º 5
0
    def __init__(
        self,
        root_dir: PathLike,
        section: str,
        transform: Union[Sequence[Callable], Callable] = (),
        download: bool = False,
        seed: int = 0,
        val_frac: float = 0.1,
        test_frac: float = 0.1,
        cache_num: int = sys.maxsize,
        cache_rate: float = 1.0,
        num_workers: Optional[int] = 1,
        progress: bool = True,
        copy_cache: bool = True,
        as_contiguous: bool = True,
    ) -> None:
        root_dir = Path(root_dir)
        if not root_dir.is_dir():
            raise ValueError("Root directory root_dir must be a directory.")
        self.section = section
        self.val_frac = val_frac
        self.test_frac = test_frac
        self.set_random_state(seed=seed)
        tarfile_name = root_dir / self.compressed_file_name
        dataset_dir = root_dir / self.dataset_folder_name
        self.num_class = 0
        if download:
            download_and_extract(
                url=self.resource,
                filepath=tarfile_name,
                output_dir=root_dir,
                hash_val=self.md5,
                hash_type="md5",
                progress=progress,
            )

        if not dataset_dir.is_dir():
            raise RuntimeError(
                f"Cannot find dataset directory: {dataset_dir}, please use download=True to download it."
            )
        data = self._generate_data_list(dataset_dir)
        if transform == ():
            transform = LoadImaged("image")
        CacheDataset.__init__(
            self,
            data=data,
            transform=transform,
            cache_num=cache_num,
            cache_rate=cache_rate,
            num_workers=num_workers,
            progress=progress,
            copy_cache=copy_cache,
            as_contiguous=as_contiguous,
        )
Ejemplo n.º 6
0
def download_spleen_dataset(root_dir: str):
    """
    This function is used to download Spleen dataset for this example.
    If you'd like to download other Decathlon datasets, please check
    ``monai.apps.datasets.DecathlonDataset`` for more details.
    """
    url = "https://msd-for-monai.s3-us-west-2.amazonaws.com/Task09_Spleen.tar"
    md5 = "410d4a301da4e5b2f6f86ec3ddba524e"
    task = "Task09_Spleen"
    dataset_dir = os.path.join(root_dir, task)
    tarfile_name = f"{dataset_dir}.tar"
    download_and_extract(url=url,
                         filepath=tarfile_name,
                         output_dir=root_dir,
                         hash_val=md5)
Ejemplo n.º 7
0
def download_mmar(item, mmar_dir=None, progress: bool = True):
    """
    Download and extract Medical Model Archive (MMAR) from Nvidia Clara Train.

    See Also:
        - https://docs.nvidia.com/clara/
        - Nvidia NGC Registry CLI
        - https://docs.nvidia.com/clara/clara-train-sdk/pt/mmar.html

    Args:
        item: the corresponding model item from `MODEL_DESC`.
        mmar_dir: target directory to store the MMAR, default is mmars subfolder under `torch.hub get_dir()`.
        progress: whether to display a progress bar.

    Examples::
        >>> from monai.apps import download_mmar
        >>> download_mmar("clara_pt_prostate_mri_segmentation_1", mmar_dir=".")

    Returns:
        The local directory of the downloaded model.
    """
    if not isinstance(item, Mapping):
        item = _get_model_spec(item)
    if not mmar_dir:
        get_dir, has_home = optional_import("torch.hub", name="get_dir")
        if has_home:
            mmar_dir = os.path.join(get_dir(), "mmars")
        else:
            raise ValueError(
                "mmar_dir=None, but no suitable default directory computed. Upgrade Pytorch to 1.6+ ?"
            )

    model_dir = os.path.join(mmar_dir, item[Keys.ID])
    download_and_extract(
        url=item[Keys.URL],
        filepath=os.path.join(mmar_dir,
                              f"{item[Keys.ID]}.{item[Keys.FILE_TYPE]}"),
        output_dir=model_dir,
        hash_val=item[Keys.HASH_VAL],
        hash_type=item[Keys.HASH_TYPE],
        file_type=item[Keys.FILE_TYPE],
        has_base=False,
        progress=progress,
    )
    return model_dir
Ejemplo n.º 8
0
    def action_datasets(self, args):
        from monai.apps.datasets import DecathlonDataset
        from monai.apps.utils import download_and_extract

        resource = DecathlonDataset.resource
        md5 = DecathlonDataset.md5

        if not args.download:
            print("Available Datasets are:")
            print("----------------------------------------------------")
            for k, v in resource.items():
                print(f"  {k:<30}: {v}")
            print("")
        else:
            url = resource.get(args.name) if args.name else None
            if not url:
                print(f"Dataset ({args.name}) NOT Exists.")

                available = "  " + "\n  ".join(resource.keys())
                print(f"Available Datasets are:: \n{available}")
                print("----------------------------------------------------")
                exit(-1)

            dataset_dir = os.path.join(args.output,
                                       args.name) if args.output else args.name
            if os.path.exists(dataset_dir):
                print(f"Directory already exists: {dataset_dir}")
                exit(-1)

            root_dir = os.path.dirname(os.path.realpath(dataset_dir))
            os.makedirs(root_dir, exist_ok=True)
            tarfile_name = f"{dataset_dir}.tar"
            download_and_extract(resource[args.name], tarfile_name, root_dir,
                                 md5.get(args.name))

            junk_files = pathlib.Path(dataset_dir).rglob("._*")
            for j in junk_files:
                os.remove(j)
            os.unlink(tarfile_name)
            print(f"{args.name} is downloaded at: {dataset_dir}")
Ejemplo n.º 9
0
def main():
    monai.config.print_config()
    logging.basicConfig(stream=sys.stdout, level=logging.INFO)
    set_determinism(12345)
    device = torch.device("cuda:0")

    # load real data
    mednist_url = "https://www.dropbox.com/s/5wwskxctvcxiuea/MedNIST.tar.gz?dl=1"
    md5_value = "0bc7306e7427e00ad1c5526a6677552d"
    extract_dir = "data"
    tar_save_path = os.path.join(extract_dir, "MedNIST.tar.gz")
    download_and_extract(mednist_url, tar_save_path, extract_dir, md5_value)
    hand_dir = os.path.join(extract_dir, "MedNIST", "Hand")
    real_data = [{
        "hand": os.path.join(hand_dir, filename)
    } for filename in os.listdir(hand_dir)]

    # define real data transforms
    train_transforms = Compose([
        LoadPNGD(keys=["hand"]),
        AddChannelD(keys=["hand"]),
        ScaleIntensityD(keys=["hand"]),
        RandRotateD(keys=["hand"], range_x=15, prob=0.5, keep_size=True),
        RandFlipD(keys=["hand"], spatial_axis=0, prob=0.5),
        RandZoomD(keys=["hand"], min_zoom=0.9, max_zoom=1.1, prob=0.5),
        ToTensorD(keys=["hand"]),
    ])

    # create dataset and dataloader
    real_dataset = CacheDataset(real_data, train_transforms)
    batch_size = 300
    real_dataloader = DataLoader(real_dataset,
                                 batch_size=batch_size,
                                 shuffle=True,
                                 num_workers=10)

    # define function to process batchdata for input into discriminator
    def prepare_batch(batchdata):
        """
        Process Dataloader batchdata dict object and return image tensors for D Inferer
        """
        return batchdata["hand"]

    # define networks
    disc_net = Discriminator(in_shape=(1, 64, 64),
                             channels=(8, 16, 32, 64, 1),
                             strides=(2, 2, 2, 2, 1),
                             num_res_units=1,
                             kernel_size=5).to(device)

    latent_size = 64
    gen_net = Generator(latent_shape=latent_size,
                        start_shape=(latent_size, 8, 8),
                        channels=[32, 16, 8, 1],
                        strides=[2, 2, 2, 1])

    # initialize both networks
    disc_net.apply(normal_init)
    gen_net.apply(normal_init)

    # input images are scaled to [0,1] so enforce the same of generated outputs
    gen_net.conv.add_module("activation", torch.nn.Sigmoid())
    gen_net = gen_net.to(device)

    # create optimizers and loss functions
    learning_rate = 2e-4
    betas = (0.5, 0.999)
    disc_opt = torch.optim.Adam(disc_net.parameters(),
                                learning_rate,
                                betas=betas)
    gen_opt = torch.optim.Adam(gen_net.parameters(),
                               learning_rate,
                               betas=betas)

    disc_loss_criterion = torch.nn.BCELoss()
    gen_loss_criterion = torch.nn.BCELoss()
    real_label = 1
    fake_label = 0

    def discriminator_loss(gen_images, real_images):
        """
        The discriminator loss is calculated by comparing D
        prediction for real and generated images.

        """
        real = real_images.new_full((real_images.shape[0], 1), real_label)
        gen = gen_images.new_full((gen_images.shape[0], 1), fake_label)

        realloss = disc_loss_criterion(disc_net(real_images), real)
        genloss = disc_loss_criterion(disc_net(gen_images.detach()), gen)

        return (genloss + realloss) / 2

    def generator_loss(gen_images):
        """
        The generator loss is calculated by determining how realistic
        the discriminator classifies the generated images.

        """
        output = disc_net(gen_images)
        cats = output.new_full(output.shape, real_label)
        return gen_loss_criterion(output, cats)

    # initialize current run dir
    run_dir = "model_out"
    print("Saving model output to: %s " % run_dir)

    # create workflow handlers
    handlers = [
        StatsHandler(
            name="batch_training_loss",
            output_transform=lambda x: {
                Keys.GLOSS: x[Keys.GLOSS],
                Keys.DLOSS: x[Keys.DLOSS]
            },
        ),
        CheckpointSaver(
            save_dir=run_dir,
            save_dict={
                "g_net": gen_net,
                "d_net": disc_net
            },
            save_interval=10,
            save_final=True,
            epoch_level=True,
        ),
    ]

    # define key metric
    key_train_metric = None

    # create adversarial trainer
    disc_train_steps = 5
    num_epochs = 50

    trainer = GanTrainer(
        device,
        num_epochs,
        real_dataloader,
        gen_net,
        gen_opt,
        generator_loss,
        disc_net,
        disc_opt,
        discriminator_loss,
        d_prepare_batch=prepare_batch,
        d_train_steps=disc_train_steps,
        latent_shape=latent_size,
        key_train_metric=key_train_metric,
        train_handlers=handlers,
    )

    # run GAN training
    trainer.run()

    # Training completed, save a few random generated images.
    print("Saving trained generator sample output.")
    test_img_count = 10
    test_latents = make_latent(test_img_count, latent_size).to(device)
    fakes = gen_net(test_latents)
    for i, image in enumerate(fakes):
        filename = "gen-fake-final-%d.png" % (i)
        save_path = os.path.join(run_dir, filename)
        img_array = image[0].cpu().data.numpy()
        png_writer.write_png(img_array, save_path, scale=255)
Ejemplo n.º 10
0
    def initialize(self, args):
        """
        `initialize` is called only once when the model is being loaded.
        Implementing `initialize` function is optional. This function allows
        the model to intialize any state associated with this model.
        """

        # Pull model from google drive
        extract_dir = "/models/monai_covid/1"
        tar_save_path = os.path.join(extract_dir, model_filename)
        download_and_extract(gdrive_url,
                             tar_save_path,
                             output_dir=extract_dir,
                             hash_val=md5_check,
                             hash_type="md5")
        # load model configuration
        self.model_config = json.loads(args['model_config'])

        # create inferer engine and load PyTorch model
        inference_device_kind = args.get('model_instance_kind', None)
        logger.info(f"Inference device: {inference_device_kind}")

        self.inference_device = torch.device('cpu')
        if inference_device_kind is None or inference_device_kind == 'CPU':
            self.inference_device = torch.device('cpu')
        elif inference_device_kind == 'GPU':
            inference_device_id = args.get('model_instance_device_id', '0')
            logger.info(f"Inference device id: {inference_device_id}")

            if torch.cuda.is_available():
                self.inference_device = torch.device(
                    f'cuda:{inference_device_id}')
                cudnn.enabled = True
            else:
                logger.error(
                    f"No CUDA device detected. Using device: {inference_device_kind}"
                )

        # create pre-transforms
        self.pre_transforms = Compose([
            LoadImage(reader="NibabelReader",
                      image_only=True,
                      dtype=np.float32),
            AddChannel(),
            ScaleIntensityRange(a_min=-1000,
                                a_max=500,
                                b_min=0.0,
                                b_max=1.0,
                                clip=True),
            CropForeground(margin=5),
            Resize([192, 192, 64], mode="area"),
            AddChannel(),
            ToTensor(),
            Lambda(func=lambda x: x.to(device=self.inference_device)),
        ])

        # create post-transforms
        self.post_transforms = Compose([
            Lambda(func=lambda x: x.to(device="cpu")),
            Activations(sigmoid=True),
            ToNumpy(),
            AsDiscrete(threshold_values=True, logit_thresh=0.5),
        ])

        self.inferer = SimpleInferer()

        self.model = torch.jit.load(
            f'{pathlib.Path(os.path.realpath(__file__)).parent}{os.path.sep}covid19_model.ts',
            map_location=self.inference_device)
Ejemplo n.º 11
0
def download_mmar(item,
                  mmar_dir=None,
                  progress: bool = True,
                  api: bool = False,
                  version: int = -1):
    """
    Download and extract Medical Model Archive (MMAR) from Nvidia Clara Train.

    See Also:
        - https://docs.nvidia.com/clara/
        - Nvidia NGC Registry CLI
        - https://docs.nvidia.com/clara/clara-train-sdk/pt/mmar.html

    Args:
        item: the corresponding model item from `MODEL_DESC`.
          Or when api is True, the substring to query NGC's model name field.
        mmar_dir: target directory to store the MMAR, default is `mmars` subfolder under `torch.hub get_dir()`.
        progress: whether to display a progress bar.
        api: whether to query NGC and download via api
        version: which version of MMAR to download.  -1 means the latest from ngc.

    Examples::
        >>> from monai.apps import download_mmar
        >>> download_mmar("clara_pt_prostate_mri_segmentation_1", mmar_dir=".")
        >>> download_mmar("prostate_mri_segmentation", mmar_dir=".", api=True)


    Returns:
        The local directory of the downloaded model.
        If api is True, a list of local directories of downloaded models.
    """
    if not mmar_dir:
        get_dir, has_home = optional_import("torch.hub", name="get_dir")
        if has_home:
            mmar_dir = os.path.join(get_dir(), "mmars")
        else:
            raise ValueError(
                "mmar_dir=None, but no suitable default directory computed. Upgrade Pytorch to 1.6+ ?"
            )

    if api:
        model_dict = _get_all_ngc_models(item)
        if len(model_dict) == 0:
            raise ValueError(
                f"api query returns no item for pattern {item}.  Please change or shorten it."
            )
        model_dir_list = []
        for k, v in model_dict.items():
            ver = v["latest"] if version == -1 else str(version)
            download_url = _get_ngc_url(k, ver)
            model_dir = os.path.join(mmar_dir, v["name"])
            download_and_extract(
                url=download_url,
                filepath=os.path.join(mmar_dir, f'{v["name"]}_{ver}.zip'),
                output_dir=model_dir,
                hash_val=None,
                hash_type="md5",
                file_type="zip",
                has_base=False,
                progress=progress,
            )
            model_dir_list.append(model_dir)
        return model_dir_list

    if not isinstance(item, Mapping):
        item = get_model_spec(item)

    ver = item.get(Keys.VERSION, 1)
    if version > 0:
        ver = str(version)
    model_fullname = f"{item[Keys.NAME]}_{ver}"
    model_dir = os.path.join(mmar_dir, model_fullname)
    model_url = item.get(Keys.URL) or _get_ngc_url(
        item[Keys.NAME], version=ver, model_prefix="nvidia/med/")
    download_and_extract(
        url=model_url,
        filepath=os.path.join(mmar_dir,
                              f"{model_fullname}.{item[Keys.FILE_TYPE]}"),
        output_dir=model_dir,
        hash_val=item[Keys.HASH_VAL],
        hash_type=item[Keys.HASH_TYPE],
        file_type=item[Keys.FILE_TYPE],
        has_base=False,
        progress=progress,
    )
    return model_dir
Ejemplo n.º 12
0
if __name__ == "__main__":

    parser = argparse.ArgumentParser(description='Triton CLI for COVID classification inference from NIFTI data')
    parser.add_argument(
        'input',
        type=str,
        help="Path to NIFTI file or directory containing NIFTI files to send for COVID classification"
    )
    args = parser.parse_args()

    nifti_files = []
    extract_dir = "./client/test_data"
    tar_save_path = os.path.join(extract_dir, covid19_filename)
    if os.path.isdir(args.input):
        # Grab files from Google Drive and place in directory
        download_and_extract(gdrive_path, tar_save_path, output_dir=extract_dir, hash_val=md5_check, hash_type="md5")
        nifti_files = open_nifti_files(args.input)
    elif os.path.isfile(args.input):
        nifti_files = [args.input]

    if not nifti_files:
        print("No valid inputs provided")
        sys.exit(1)

    with httpclient.InferenceServerClient("localhost:7555") as client:
        image_bytes = b''
        for nifti_file in nifti_files:
            with open(nifti_file, 'rb') as f:
                image_bytes = f.read()

            input0_data = np.array([[image_bytes]], dtype=np.bytes_)