Esempio n. 1
0
def segment_torch(image, weights_file, overlap, device_id, probability_array,
                  comm_array):
    import torch

    from .model import Unet3D

    device = torch.device(device_id)
    if weights_file.exists():
        state_dict = torch.load(str(weights_file),
                                map_location=torch.device('cpu'))
    else:
        raise FileNotFoundError("Weights file not found")
    model = Unet3D()
    model.load_state_dict(state_dict["model_state_dict"])
    model.to(device)
    model.eval()

    image = imagedata_utils.image_normalize(image,
                                            0.0,
                                            1.0,
                                            output_dtype=np.float32)
    sums = np.zeros_like(image)
    # segmenting by patches
    for completion, sub_image, patch in gen_patches(image, SIZE, overlap):
        comm_array[0] = completion
        (iz, ez), (iy, ey), (ix, ex) = patch
        sub_mask = predict_patch_torch(sub_image, patch, model, device, SIZE)
        probability_array[iz:ez, iy:ey, ix:ex] += sub_mask
        sums[iz:ez, iy:ey, ix:ex] += 1

    probability_array /= sums
    comm_array[0] = np.Inf
Esempio n. 2
0
def brain_segment(image, probability_array, comm_array):
    import keras

    # Loading model
    folder = inv_paths.MODELS_DIR.joinpath("brain_mri_t1")
    with open(folder.joinpath("model.json"), "r") as json_file:
        model = keras.models.model_from_json(json_file.read())
    model.load_weights(str(folder.joinpath("model.h5")))
    model.compile("Adam", "binary_crossentropy")

    image = imagedata_utils.image_normalize(image,
                                            0.0,
                                            1.0,
                                            output_dtype=np.float32)
    sums = np.zeros_like(image)
    # segmenting by patches
    for completion, sub_image, patch in gen_patches(image, SIZE, OVERLAP):
        comm_array[0] = completion
        (iz, ez), (iy, ey), (ix, ex) = patch
        sub_mask = predict_patch(sub_image, patch, model, SIZE)
        probability_array[iz:ez, iy:ey, ix:ex] += sub_mask
        sums[iz:ez, iy:ey, ix:ex] += 1

    probability_array /= sums
    comm_array[0] = np.Inf
Esempio n. 3
0
    def OnOk(self, evt):
        if self.cb_option.GetValue() == "Blobs":
            size_x = self.blobs_panel.spin_size_x.GetValue()
            size_y = self.blobs_panel.spin_size_y.GetValue()
            size_z = self.blobs_panel.spin_size_z.GetValue()
            gaussian = self.blobs_panel.spin_gaussian.GetValue()
            schwarp_f = schwarzp.create_blobs(size_x, size_y, size_z, gaussian)
        elif self.cb_option.GetValue() == "Voronoy":
            size_x = self.voronoy_panel.spin_size_x.GetValue()
            size_y = self.voronoy_panel.spin_size_y.GetValue()
            size_z = self.voronoy_panel.spin_size_z.GetValue()
            number_sites = self.voronoy_panel.spin_nsites.GetValue()
            #  distance = self.voronoy_panel.cb_distance.GetSelection()
            normalize = self.voronoy_panel.cb_normalize.GetValue()
            schwarp_f = schwarzp.create_voronoy(size_x, size_y, size_z,
                                                number_sites, normalize)
        else:
            init_x = self.schwarp_panel.spin_from_x.GetValue()
            end_x = self.schwarp_panel.spin_to_x.GetValue()
            size_x = self.schwarp_panel.spin_size_x.GetValue()

            init_y = self.schwarp_panel.spin_from_y.GetValue()
            end_y = self.schwarp_panel.spin_to_y.GetValue()
            size_y = self.schwarp_panel.spin_size_y.GetValue()

            init_z = self.schwarp_panel.spin_from_z.GetValue()
            end_z = self.schwarp_panel.spin_to_z.GetValue()
            size_z = self.schwarp_panel.spin_size_z.GetValue()
            schwarp_f = schwarzp.create_schwarzp(
                self.cb_option.GetValue(),
                init_x,
                end_x,
                init_y,
                end_y,
                init_z,
                end_z,
                size_x,
                size_y,
                size_z,
            )

        schwarp_i16 = imagedata_utils.image_normalize(schwarp_f,
                                                      min_=-1000,
                                                      max_=1000)
        Publisher.sendMessage(
            "Create project from matrix",
            name="SchwarzP",
            matrix=schwarp_i16,
            new_instance=self.cb_new_inv_instance.GetValue(),
        )
        self.Close()
Esempio n. 4
0
def np2bitmap(arr):
    try:
        height, width, bands = arr.shape
        npimg = arr
    except ValueError:
        height, width = arr.shape
        bands = 3
        arr = imagedata_utils.image_normalize(arr, 0, 255)
        npimg = np.zeros((height, width, bands), dtype=np.uint8)
        npimg[:, :, 0] = arr
        npimg[:, :, 1] = arr
        npimg[:, :, 2] = arr

    image = wx.Image(width, height)
    image.SetData(npimg.tostring())
    return image.ConvertToBitmap()
Esempio n. 5
0
def brain_segment_torch(image, device_id, probability_array, comm_array):
    import torch
    from .model import Unet3D
    device = torch.device(device_id)
    folder = inv_paths.MODELS_DIR.joinpath("brain_mri_t1")
    system_state_dict_file = folder.joinpath("brain_mri_t1.pt")
    user_state_dict_file = inv_paths.USER_DL_WEIGHTS.joinpath(
        "brain_mri_t1.pt")
    if not system_state_dict_file.exists() and not user_state_dict_file.exists(
    ):
        download_url_to_file(
            "https://github.com/tfmoraes/deepbrain_torch/releases/download/v1.1.0/weights.pt",
            user_state_dict_file,
            "194b0305947c9326eeee9da34ada728435a13c7b24015cbd95971097fc178f22",
            download_callback(comm_array))
    if user_state_dict_file.exists():
        state_dict = torch.load(str(user_state_dict_file))
    elif system_state_dict_file.exists():
        state_dict = torch.load(str(system_state_dict_file))
    else:
        raise FileNotFoundError("Weights file not found")
    model = Unet3D()
    model.load_state_dict(state_dict["model_state_dict"])
    model.to(device)
    model.eval()

    image = imagedata_utils.image_normalize(image,
                                            0.0,
                                            1.0,
                                            output_dtype=np.float32)
    sums = np.zeros_like(image)
    # segmenting by patches
    for completion, sub_image, patch in gen_patches(image, SIZE, OVERLAP):
        comm_array[0] = completion
        (iz, ez), (iy, ey), (ix, ex) = patch
        sub_mask = predict_patch_torch(sub_image, patch, model, device, SIZE)
        probability_array[iz:ez, iy:ey, ix:ex] += sub_mask
        sums[iz:ez, iy:ey, ix:ex] += 1

    probability_array /= sums
    comm_array[0] = np.Inf
Esempio n. 6
0
def segment_keras(image, weights_file, overlap, probability_array, comm_array):
    import keras

    # Loading model
    with open(weights_file, "r") as json_file:
        model = keras.models.model_from_json(json_file.read())
    model.load_weights(str(weights_file.parent.joinpath("model.h5")))
    model.compile("Adam", "binary_crossentropy")

    image = imagedata_utils.image_normalize(image,
                                            0.0,
                                            1.0,
                                            output_dtype=np.float32)
    sums = np.zeros_like(image)
    # segmenting by patches
    for completion, sub_image, patch in gen_patches(image, SIZE, overlap):
        comm_array[0] = completion
        (iz, ez), (iy, ey), (ix, ex) = patch
        sub_mask = predict_patch(sub_image, patch, model, SIZE)
        probability_array[iz:ez, iy:ey, ix:ex] += sub_mask
        sums[iz:ez, iy:ey, ix:ex] += 1

    probability_array /= sums
    comm_array[0] = np.Inf
Esempio n. 7
0
    def segment(self,
                image,
                prob_threshold,
                backend,
                device_id,
                use_gpu,
                progress_callback=None,
                after_segment=None):
        print("backend", backend)
        if backend.lower() == 'plaidml':
            os.environ["KERAS_BACKEND"] = "plaidml.keras.backend"
            os.environ["PLAIDML_DEVICE_IDS"] = device_id
        elif backend.lower() == 'theano':
            os.environ["KERAS_BACKEND"] = "theano"
            if use_gpu:
                os.environ["THEANO_FLAGS"] = "device=cuda0"
                print("Use GPU theano", os.environ["THEANO_FLAGS"])
            else:
                os.environ["THEANO_FLAGS"] = "device=cpu"
        else:
            raise TypeError("Wrong backend")

        import keras
        import invesalius.data.slice_ as slc

        image = imagedata_utils.image_normalize(image, 0.0, 1.0)

        # Loading model
        folder = pathlib.Path(__file__).parent.resolve()
        with open(folder.joinpath("model.json"), "r") as json_file:
            model = keras.models.model_from_json(json_file.read())
        model.load_weights(str(folder.joinpath("model.h5")))
        model.compile("Adam", "binary_crossentropy")

        # segmenting by patches
        msk = np.zeros_like(image, dtype="float32")
        sums = np.zeros_like(image)
        for completion, sub_image, patch in gen_patches(image, SIZE, OVERLAP):
            if self.stop:
                self.stop = False
                return

            if progress_callback is not None:
                progress_callback(completion)
            print("completion", completion)
            (iz, ez), (iy, ey), (ix, ex) = patch
            sub_mask = predict_patch(sub_image, patch, model, SIZE)
            msk[iz:ez, iy:ey, ix:ex] += sub_mask
            sums[iz:ez, iy:ey, ix:ex] += 1

        propability_array = msk / sums

        mask = slc.Slice().create_new_mask()
        mask.was_edited = True
        mask.matrix[:] = 1
        mask.matrix[1:, 1:, 1:] = (propability_array >= prob_threshold) * 255

        self.mask = mask
        self.propability_array = propability_array
        self.segmented = True
        if after_segment is not None:
            after_segment()