Exemplo n.º 1
0
def postprocess_output(output, patient_nib_shape):
    to_save = interpolate_image(output, patient_nib_shape)
    to_save[to_save >= 0.9] = 1
    to_save[to_save < 0.9] = 0
    to_save = postprocess_prediction(to_save)

    return to_save
Exemplo n.º 2
0
def postprocess_save_output(output,
                            patient_nib,
                            output_path,
                            save_nib_to_disk=False):
    to_save = interpolate_image(output, patient_nib.shape)
    to_save[to_save >= 0.9] = 1
    to_save[to_save < 0.9] = 0
    to_save = postprocess_prediction(to_save)

    to_save_nib = None
    if save_nib_to_disk:
        to_save_nib = nib.Nifti1Image(to_save, patient_nib.affine)
        nib.save(to_save_nib, os.path.join(output_path))
        print(f"to_save Image size: {to_save.shape} , dtype: {to_save.dtype} ")
        print("Output saved at: ", output_path)

    return to_save, to_save_nib
Exemplo n.º 3
0
def infer_single_multi_4(input_paths,
                         output_path,
                         weights,
                         mask_path=None,
                         device="cpu"):
    """
    Inference using multi modality network

    Parameters [TODO]
    ----------
    input_paths : list
        path to all input images following T1_path,T2_path,T1ce_path,Flair_path
    output_path : str
        path of the mask to be generated (prediction)
    weights : str
        path to the weights of the model used
    device : int/str
        device to be run on

    Returns
    -------
    None.

    """
    assert all([os.path.exists(image_path) for image_path in input_paths])

    start = time.asctime()
    startstamp = time.time()
    print("\nHostname   :" + str(os.getenv("HOSTNAME")))
    print("\nStart Time :" + str(start))
    print("\nStart Stamp:" + str(startstamp))
    sys.stdout.flush()

    # default config for multi-4 as from config/test_params_multi_4.cfg
    model = fetch_model(
        modelname="resunet",
        num_channels=4,
        num_classes=2,
        num_filters=16,
    )

    checkpoint = torch.load(str(weights), map_location=torch.device('cpu'))
    model.load_state_dict(checkpoint["model_state_dict"])

    if device != "cpu":
        model.cuda()
    model.eval()

    stack = np.zeros([4, 128, 128, 128], dtype=np.float32)
    for i, image_path in enumerate(input_paths):
        patient_nib = nib.load(image_path)
        image = patient_nib.get_fdata()
        image = preprocess_image(patient_nib)
        stack[i] = image
    stack = stack[np.newaxis, ...]
    image = torch.FloatTensor(stack)

    if device != "cpu":
        image = image.cuda()

    with torch.no_grad():
        output = model(image)
        output = output.cpu().numpy()[0][0]
        to_save = interpolate_image(output, (240, 240, 160))
        to_save = unpad_image(to_save)
        to_save[to_save >= 0.9] = 1
        to_save[to_save < 0.9] = 0
        for i in range(to_save.shape[2]):
            if np.any(to_save[:, :, i]):
                to_save[:, :, i] = binary_fill_holes(to_save[:, :, i])
        to_save = postprocess_prediction(to_save).astype(np.uint8)
        to_save_nib = nib.Nifti1Image(to_save, patient_nib.affine)
        nib.save(to_save_nib, os.path.join(output_path))

    print("Done with running the model.")

    if mask_path is not None:
        raise NotImplementedError('Sorry, masking is not implemented (yet).')

    print("Final output stored in : %s" % (output_path))
    print("Thank you for using BrainMaGe")
    print("*" * 60)
Exemplo n.º 4
0
def infer_single_ma(input_path,
                    output_path,
                    weights,
                    mask_path=None,
                    device="cpu"):
    start = time.asctime()
    startstamp = time.time()
    print("\nHostname   :" + str(os.getenv("HOSTNAME")))
    print("\nStart Time :" + str(start))
    print("\nStart Stamp:" + str(startstamp))
    sys.stdout.flush()
    print("Generating Test csv")

    model = fetch_model(modelname="resunet",
                        num_channels=1,
                        num_classes=2,
                        num_filters=16)

    checkpoint = torch.load(weights, map_location=torch.device('cpu'))
    model.load_state_dict(checkpoint["model_state_dict"])

    if device != "cpu":
        model.cuda()
    model.eval()

    patient_nib = nib.load(input_path)
    image_data = patient_nib.get_fdata()
    old_shape = patient_nib.shape
    image = process_image(image_data)
    image = resize(image, (128, 128, 128),
                   order=3,
                   mode="edge",
                   cval=0,
                   anti_aliasing=False)
    image = image[np.newaxis, np.newaxis, ...]
    image = torch.FloatTensor(image)
    if device != "cpu":
        image = image.cuda()
    with torch.no_grad():
        output = model(image)
        output = output.cpu().numpy()[0][0]
        to_save = interpolate_image(output, patient_nib.shape)
        to_save[to_save >= 0.9] = 1
        to_save[to_save < 0.9] = 0
        to_save = postprocess_prediction(to_save)
        to_save_nib = nib.Nifti1Image(to_save, patient_nib.affine)
        nib.save(to_save_nib, os.path.join(output_path))

    print("Done with running the model.")

    if mask_path is not None:
        print(
            "You chose to save the brain. We are now saving it with the masks."
        )
        patient_nib_write = nib.load(input_path)
        image_data_write = patient_nib_write.get_fdata()
        image_data_write[to_save == 0] = 0
        to_save_brain = nib.Nifti1Image(image_data_write,
                                        patient_nib_write.affine)
        nib.save(to_save_brain, os.path.join(mask_path))

    print("Thank you for using BrainMaGe")
    print("*" * 60)
Exemplo n.º 5
0
def infer_multi_4(cfg, device, save_brain, weights):
    """
    Inference using multi modality network

    Parameters
    ----------
    cfg : string
        Location of the config file
    device : int/str
        device to be run on
    save_brain : int
        whether to save brain or not

    Returns
    -------
    None.

    """
    cfg = os.path.abspath(cfg)

    if os.path.isfile(cfg):
        params_df = pd.read_csv(
            cfg,
            sep=" = ",
            names=["param_name", "param_value"],
            comment="#",
            skip_blank_lines=True,
            engine="python",
        ).fillna(" ")
    else:
        print("Missing test_params.cfg file? Please give one!")
        sys.exit(0)
    params = {}
    params["weights"] = weights
    for i in range(params_df.shape[0]):
        params[params_df.iloc[i, 0]] = params_df.iloc[i, 1]
    start = time.asctime()
    startstamp = time.time()
    print("\nHostname   :" + str(os.getenv("HOSTNAME")))
    print("\nStart Time :" + str(start))
    print("\nStart Stamp:" + str(startstamp))
    sys.stdout.flush()

    print("Generating Test csv")
    if not os.path.exists(os.path.join(params["results_dir"])):
        os.mkdir(params["results_dir"])
    if not params["csv_provided"] == "True":
        print("Since CSV were not provided, we are gonna create for you")
        csv_creator_adv.generate_csv(
            params["test_dir"],
            to_save=params["results_dir"],
            mode=params["mode"],
            ftype="test",
            modalities=params["modalities"],
        )
        test_csv = os.path.join(params["results_dir"], "test.csv")
    else:
        test_csv = params["test_csv"]

    test_df = pd.read_csv(test_csv)

    model = fetch_model(
        params["model"],
        int(params["num_modalities"]),
        int(params["num_classes"]),
        int(params["base_filters"]),
    )
    if device != "cpu":
        model.cuda()

    checkpoint = torch.load(str(params["weights"]))
    model.load_state_dict(checkpoint["model_state_dict"])
    model.eval()

    for patient in tqdm.tqdm(test_df.values):
        os.makedirs(os.path.join(params["results_dir"], patient[0]),
                    exist_ok=True)
        nmods = params["num_modalities"]
        stack = np.zeros([int(nmods), 128, 128, 128], dtype=np.float32)
        for i in range(int(nmods)):
            image_path = patient[i + 1]
            patient_nib = nib.load(image_path)
            image = patient_nib.get_fdata()
            image = preprocess_image(patient_nib)
            stack[i] = image
        stack = stack[np.newaxis, ...]
        stack = torch.FloatTensor(stack)
        if device != "cpu":
            image = stack.cuda()
        with torch.no_grad():
            output = model(image)
            output = output.cpu().numpy()[0][0]
            to_save = interpolate_image(output, (240, 240, 160))
            to_save = unpad_image(to_save)
            to_save[to_save >= 0.9] = 1
            to_save[to_save < 0.9] = 0
            for i in range(to_save.shape[2]):
                if np.any(to_save[:, :, i]):
                    to_save[:, :, i] = binary_fill_holes(to_save[:, :, i])
            to_save = postprocess_prediction(to_save).astype(np.uint8)
            to_save_mask = nib.Nifti1Image(to_save, patient_nib.affine)
            nib.save(
                to_save_mask,
                os.path.join(params["results_dir"], patient[0],
                             patient[0] + "_mask.nii.gz"),
            )
    print("Done with running the model.")
    if save_brain:
        print(
            "You chose to save the brain. We are now saving it with the masks."
        )
        for patient in tqdm.tqdm(test_df.values):
            nmods = params["num_modalities"]
            mask_nib = nib.load(
                os.path.join(params["results_dir"], patient[0],
                             patient[0] + "_mask.nii.gz"))
            mask_data = mask_nib.get_fdata().astype(np.int8)
            for i in range(int(nmods)):
                image_name = os.path.basename(patient[i + 1]).strip(".nii.gz")
                image_path = patient[i + 1]
                patient_nib = nib.load(image_path)
                image_data = patient_nib.get_fdata()
                image_data[mask_data == 0] = 0
                to_save_image = nib.Nifti1Image(image_data, patient_nib.affine)
                nib.save(
                    to_save_image,
                    os.path.join(params["results_dir"], patient[0],
                                 image_name + "_brain.nii.gz"),
                )

    print("Final output stored in : %s" % (params["results_dir"]))
    print("Thank you for using BrainMaGe")
    print("*" * 60)
Exemplo n.º 6
0
def infer_ma(cfg, device, save_brain, weights):

    cfg = os.path.abspath(cfg)

    if os.path.isfile(cfg):
        params_df = pd.read_csv(cfg,
                                sep=' = ',
                                names=['param_name', 'param_value'],
                                comment='#',
                                skip_blank_lines=True,
                                engine='python').fillna(' ')
    else:
        print('Missing test_params.cfg file? Please give one!')
        sys.exit(0)
    params = {}
    for i in range(params_df.shape[0]):
        params[params_df.iloc[i, 0]] = params_df.iloc[i, 1]
    params['weights'] = weights
    start = time.asctime()
    startstamp = time.time()
    print("\nHostname   :" + str(os.getenv("HOSTNAME")))
    print("\nStart Time :" + str(start))
    print("\nStart Stamp:" + str(startstamp))
    sys.stdout.flush()

    print("Generating Test csv")
    if not os.path.exists(os.path.join(params['results_dir'])):
        os.mkdir(params['results_dir'])
    if not params['csv_provided'] == 'True':
        print('Since CSV were not provided, we are gonna create for you')
        csv_creator_adv.generate_csv(params['test_dir'],
                                     to_save=params['results_dir'],
                                     mode=params['mode'],
                                     ftype='test',
                                     modalities=params['modalities'])
        test_csv = os.path.join(params['results_dir'], 'test.csv')
    else:
        test_csv = params['test_csv']

    test_df = pd.read_csv(test_csv)
    test_df.ID = test_df.ID.astype(str)
    temp_dir = os.path.join(params['results_dir'], 'Temp')
    os.makedirs(temp_dir, exist_ok=True)

    patients_dict = {}

    print("Resampling the images to isotropic resolution of 1mm x 1mm x 1mm")
    print("Also Converting the images to RAI and brats for smarter use.")
    for patient in tqdm.tqdm(test_df.values):
        os.makedirs(os.path.join(temp_dir, patient[0]), exist_ok=True)
        patient_path = patient[1]
        image = nib.load(patient_path)
        old_spacing = image.header.get_zooms()
        old_affine = image.affine
        old_shape = image.header.get_data_shape()
        new_spacing = (1, 1, 1)
        new_shape = (int(
            np.round(old_spacing[0] / new_spacing[0] * float(image.shape[0]))),
                     int(
                         np.round(old_spacing[1] / new_spacing[1] *
                                  float(image.shape[1]))),
                     int(
                         np.round(old_spacing[2] / new_spacing[2] *
                                  float(image.shape[2]))))
        image_data = image.get_fdata()
        new_image = resize(image_data,
                           new_shape,
                           order=3,
                           mode='edge',
                           cval=0,
                           anti_aliasing=False)
        new_affine = np.eye(4)
        new_affine = np.array(old_affine)
        for i in range(3):
            for j in range(3):
                if old_affine[i, j] != 0:
                    new_affine[i,
                               j] = old_affine[i, j] * (1 / old_affine[i, j])
                    if old_affine[i, j] <= 0:
                        new_affine[i, j] = -1 * (old_affine[i, j] *
                                                 (1 / old_affine[i, j]))
        temp_image = nib.Nifti1Image(new_image, new_affine)
        nib.save(
            temp_image,
            os.path.join(temp_dir, patient[0],
                         patient[0] + '_resamp111.nii.gz'))

        temp_dict = {}
        temp_dict['name'] = patient[0]
        temp_dict['old_spacing'] = old_spacing
        temp_dict['old_affine'] = old_affine
        temp_dict['old_shape'] = old_shape
        temp_dict['new_spacing'] = new_spacing
        temp_dict['new_affine'] = new_affine
        temp_dict['new_shape'] = new_shape

        patient_path = os.path.join(temp_dir, patient[0],
                                    patient[0] + '_resamp111.nii.gz')
        patient_nib = nib.load(patient_path)
        patient_data = patient_nib.get_fdata()
        patient_data, pad_info = pad_image(patient_data)
        patient_affine = patient_nib.affine
        temp_image = nib.Nifti1Image(patient_data, patient_affine)
        nib.save(
            temp_image,
            os.path.join(temp_dir, patient[0],
                         patient[0] + '_bratsized.nii.gz'))
        temp_dict['pad_info'] = pad_info
        patients_dict[patient[0]] = temp_dict

    model = fetch_model(params['model'], int(params['num_modalities']),
                        int(params['num_classes']),
                        int(params['base_filters']))
    checkpoint = torch.load(str(params['weights']))
    model.load_state_dict(checkpoint['model_state_dict'])

    if device != 'cpu':
        model.cuda()
    model.eval()

    print("Done Resampling the Data.\n")
    print("--" * 30)
    print("Running the model on the subjects")
    for patient in tqdm.tqdm(test_df.values):
        patient_path = os.path.join(temp_dir, patient[0],
                                    patient[0] + '_bratsized.nii.gz')
        patient_nib = nib.load(patient_path)
        image = patient_nib.get_fdata()
        image = process_image(image)
        image = resize(image, (128, 128, 128),
                       order=3,
                       mode='edge',
                       cval=0,
                       anti_aliasing=False)
        image = image[np.newaxis, np.newaxis, ...]
        image = torch.FloatTensor(image)
        if device != 'cpu':
            image = image.cuda()
        with torch.no_grad():
            output = model(image)
            output = output.cpu().numpy()[0][0]
            to_save = interpolate_image(output, patient_nib.shape)
            to_save[to_save >= 0.9] = 1
            to_save[to_save < 0.9] = 0
            to_save_nib = nib.Nifti1Image(to_save, patient_nib.affine)
            nib.save(
                to_save_nib,
                os.path.join(temp_dir, patient[0],
                             patient[0] + '_bratsized_mask.nii.gz'))
            current_patient_dict = patients_dict[patient[0]]
            new_image = padder_and_cropper(to_save,
                                           current_patient_dict['pad_info'])
            to_save_new_nib = nib.Nifti1Image(new_image, patient_nib.affine)
            nib.save(
                to_save_new_nib,
                os.path.join(temp_dir, patient[0],
                             patient[0] + '_resample111_mask.nii.gz'))
            to_save_final = resize(new_image,
                                   current_patient_dict['old_shape'],
                                   order=3,
                                   mode='edge',
                                   cval=0)
            to_save_final[to_save_final > 0.9] = 1
            to_save_final[to_save_final < 0.9] = 0
            for i in range(to_save_final.shape[2]):
                if np.any(to_save_final[:, :, i]):
                    to_save_final[:, :,
                                  i] = binary_fill_holes(to_save_final[:, :,
                                                                       i])
            to_save_final = postprocess_prediction(to_save_final).astype(
                np.uint8)
            to_save_final_nib = nib.Nifti1Image(
                to_save_final, current_patient_dict['old_affine'])

            os.makedirs(os.path.join(params['results_dir'], patient[0]),
                        exist_ok=True)

            nib.save(
                to_save_final_nib,
                os.path.join(params['results_dir'], patient[0],
                             patient[0] + '_mask.nii.gz'))

    print("Done with running the model.")
    if save_brain:
        print(
            "You chose to save the brain. We are now saving it with the masks."
        )
        for patient in tqdm.tqdm(test_df.values):
            image = nib.load(patient[1])
            image_data = image.get_fdata()
            mask = nib.load(
                os.path.join(params['results_dir'], patient[0],
                             patient[0] + '_mask.nii.gz'))
            mask_data = mask.get_fdata().astype(np.int8)
            image_data[mask_data == 0] = 0
            to_save_brain = nib.Nifti1Image(image_data, image.affine)
            nib.save(
                to_save_brain,
                os.path.join(params['results_dir'], patient[0],
                             patient[0] + '_brain.nii.gz'))

    print("Please check the %s folder for the intermediate outputs if you\"+\
          would like to see some intermediate steps." %
          (os.path.join(params['results_dir'], 'Temp')))
    print("Final output stored in : %s" % (params['results_dir']))
    print("Thank you for using BrainMaGe")
    print('*' * 60)