Ejemplo n.º 1
0
    def on_fill_axons_button(self, event):
        """
        This function is called when the fillAxon button is pressed by the user. It uses a flood fill algorithm to fill
        the inside of the myelin objects with the axon mask
        """
        # Find the visible myelin and axon mask
        myelin_mask_overlay = self.get_visible_myelin_overlay()
        axon_mask_overlay = self.get_visible_axon_overlay()

        if myelin_mask_overlay is None:
            return
        if axon_mask_overlay is None:
            return

        # Extract the data from the overlays
        myelin_array = myelin_mask_overlay[:, :, 0]
        axon_array = axon_mask_overlay[:, :, 0]

        # Perform the floodfill operation
        axon_extracted_array = postprocessing.floodfill_axons(
            axon_array, myelin_array)

        axon_corr_array = np.flipud(axon_extracted_array)
        axon_corr_array = params.intensity['binary'] * np.rot90(
            axon_corr_array, k=1, axes=(1, 0))
        file_name = self.ads_temp_dir / (
            myelin_mask_overlay.name[:-len("-myelin")] + "-axon-corr.png")
        ads_utils.imwrite(filename=file_name, img=axon_corr_array)
        self.load_png_image_from_path(file_name, is_mask=True, colormap="blue")
Ejemplo n.º 2
0
def generate_axons_from_myelin(path_prediction, path_myelin_corrected):
    """
    :param path_prediction: path of the prediction i.e. image of axon+myelin segmentation (output of AxonDeepSeg)
    :param path_myelin_corrected: path of corrected myelin by the user i.e. myelin mask (uint8 type with myelin=255, background=0)
    :return: merged and corrected axon+myelin image
    """

    # If string, convert to Path objects
    path_prediction = convert_path(path_prediction)
    path_myelin_corrected = convert_path(path_myelin_corrected)

    # read output from axondeepseg and myelin mask corrected by user
    prediction = ads.imread(path_prediction)
    myelin_corrected = ads.imread(path_myelin_corrected)

    # compute the axon mask from axondeepseg (axon=255, myelin=127, background=0)
    axon_ads = prediction > 200

    # get the myelin mask corrected by user (myelin=255, background=0)
    myelin_corrected = myelin_corrected > 200

    # compute logical OR between axondeepseg axon mask and myelin corrected mask
    fused = np.logical_or(axon_ads, myelin_corrected)

    # compute new axon mask by logical XOR between corrected myelin mask and fused
    new_axon_mask = np.logical_xor(myelin_corrected, fused)

    # merge corrected myelin mask and generated axon mask
    both = new_axon_mask * 255 + myelin_corrected * 127

    # save the corrected axon+myelin image
    path = path_prediction.parent / 'axon_myelin_mask_corrected.png'
    ads.imwrite(path, both)

    return both
Ejemplo n.º 3
0
    def on_load_mask_button(self, event):
        """
        This function is called when the user presses on the loadMask button. It allows the user to select an existing
        PNG mask, convert it into a NIfTI and load it into FSLeyes.
        The mask needs to contain an axon + myelin mask. The Axons should have an intensity > 200. The myelin should
        have an intensity between 100 and 200. The data should be in uint8.
        """
        # Ask the user to select the mask image
        with wx.FileDialog(self,
                           "select mask .png file",
                           style=wx.FD_OPEN
                           | wx.FD_FILE_MUST_EXIST) as file_dialog:

            if (file_dialog.ShowModal() == wx.ID_CANCEL
                ):  # The user cancelled the operation
                return

            in_file = file_dialog.GetPath()

        # Check if the image format is valid
        image_extension = os.path.splitext(in_file)[1]
        valid_extensions = [".png", ".tif", ".jpg", ".jpeg"]
        if image_extension not in valid_extensions:
            self.show_message("Invalid file extension")
            return

        # Get the image data
        img_png2D = ads_utils.imread(in_file)

        image_name = os.path.basename(in_file)
        image_name = image_name.split(image_extension)[0]

        # Extract the Axon mask
        axon_mask = img_png2D > 200
        axon_mask = params.intensity['binary'] * np.array(axon_mask,
                                                          dtype=np.uint8)

        # Extract the Myelin mask
        myelin_mask = (img_png2D > 100) & (img_png2D < 200)
        myelin_mask = params.intensity['binary'] * np.array(myelin_mask,
                                                            dtype=np.uint8)

        # Load the masks into FSLeyes
        axon_outfile = self.ads_temp_dir.name + "/" + image_name + "-axon.png"
        ads_utils.imwrite(axon_outfile, axon_mask)
        self.load_png_image_from_path(axon_outfile,
                                      is_mask=True,
                                      colormap="blue")

        myelin_outfile = self.ads_temp_dir.name + "/" + image_name + "-myelin.png"
        ads_utils.imwrite(myelin_outfile, myelin_mask)
        self.load_png_image_from_path(myelin_outfile,
                                      is_mask=True,
                                      colormap="red")
Ejemplo n.º 4
0
def merge_masks(path_axon, path_myelin):
    # If string, convert to Path objects
    path_axon = convert_path(path_axon)

    axon = ads.imread(path_axon)
    myelin = ads.imread(path_myelin)

    both = (axon / 255) * 255 + (myelin / 255) * 127

    # get main path
    path_folder = path_axon.parent

    # save the masks
    ads.imwrite(path_folder / 'axon_myelin_mask.png', both)

    return both
Ejemplo n.º 5
0
def axon_segmentation(path_acquisitions_folders,
                      acquisitions_filenames,
                      path_model_folder,
                      config_dict,
                      ckpt_name='model',
                      segmentations_filenames=[str(axonmyelin_suffix)],
                      inference_batch_size=1,
                      overlap_value=25,
                      resampled_resolutions=0.1,
                      acquired_resolution=None,
                      prediction_proba_activate=False,
                      write_mode=True,
                      gpu_per=1.0,
                      verbosity_level=0):
    """
    Wrapper performing the segmentation of all the requested acquisitions and generates (if requested) the segmentation
    images.
    :param path_acquisitions_folders: List of folders where the acquisitions to segment are located.
    :param acquisitions_filenames: List of names of acquisitions to segment.
    :param path_model_folder: Path to the folder where the model is located.
    :param config_dict: Dictionary containing the configuration of the training parameters of the model.
    :param ckpt_name: String, name of the checkpoint to use.
    :param segmentations_filenames: List of the names of the segmentations files, to be used when creating the files.
    :param inference_batch_size: Size of the batches fed to the network.
    :param overlap_value: Int, number of pixels to use for overlapping the predictions.
    :param resampled_resolutions: List of the resolutions (in µm) to resample to.
    :param acquired_resolution: List of the resolutions (in µm) for native images.
    :param prediction_proba_activate: Boolean, whether to compute probability maps or not.
    :param write_mode: Boolean, whether to create segmentation images or not.
    :param gpu_per: Percentage of the GPU to use, if we use it.
    :param verbosity_level: Int, level of verbosity. The higher, the more information is displayed.
    :return: List of predictions, and optionally of probability maps.
    """

    # If string, convert to Path objects
    path_acquisitions_folders = convert_path(path_acquisitions_folders)
    path_model_folder = convert_path(path_model_folder)
    # Processing input so they are lists in every situation
    path_acquisitions_folders, acquisitions_filenames, resampled_resolutions, segmentations_filenames = \
        list(map(ensure_list_type, [path_acquisitions_folders, acquisitions_filenames, resampled_resolutions,
                                    segmentations_filenames]))

    if len(segmentations_filenames) != len(path_acquisitions_folders):
        segmentations_filenames = [str(axonmyelin_suffix)
                                   ] * len(path_acquisitions_folders)

    if len(acquisitions_filenames) != len(path_acquisitions_folders):
        acquisitions_filenames = ['image.png'] * len(path_acquisitions_folders)

    if len(resampled_resolutions) != len(path_acquisitions_folders):
        resampled_resolutions = [resampled_resolutions[0]
                                 ] * len(path_acquisitions_folders)

    # Generating the patch to acquisitions and loading the acquisitions resolutions.
    path_acquisitions = [
        path_acquisitions_folders[i] / e
        for i, e in enumerate(acquisitions_filenames)
    ]

    # If we did not receive any resolution we read the pixel size in micrometer from each pixel.
    if acquired_resolution == None:
        if (path_acquisitions_folders[0] /
                'pixel_size_in_micrometer.txt').exists():
            resolutions_files = [
                open(path_acquisition_folder / 'pixel_size_in_micrometer.txt',
                     'r')
                for path_acquisition_folder in path_acquisitions_folders
            ]
            acquisitions_resolutions = [
                float(file_.read()) for file_ in resolutions_files
            ]
        else:
            exception_msg = "ERROR: No pixel size is provided, and there is no pixel_size_in_micrometer.txt file in image folder. " \
                            "Please provide a pixel size (using argument -s), or add a pixel_size_in_micrometer.txt file " \
                            "containing the pixel size value."
            raise Exception(exception_msg)

    # If resolution is specified as input argument, use it
    else:
        acquisitions_resolutions = [acquired_resolution
                                    ] * len(path_acquisitions_folders)

    # Ensuring that the config file is valid
    config_dict = update_config(default_configuration(), config_dict)

    # Perform the segmentation of all the requested images.
    if prediction_proba_activate:
        prediction, prediction_proba = apply_convnet(
            path_acquisitions,
            acquisitions_resolutions,
            path_model_folder,
            config_dict,
            ckpt_name=ckpt_name,
            inference_batch_size=inference_batch_size,
            overlap_value=overlap_value,
            resampled_resolutions=resampled_resolutions,
            prediction_proba_activate=prediction_proba_activate,
            gpu_per=gpu_per,
            verbosity_level=verbosity_level)
        # Predictions are shape of image, value = class of pixel
    else:
        prediction = apply_convnet(
            path_acquisitions,
            acquisitions_resolutions,
            path_model_folder,
            config_dict,
            ckpt_name=ckpt_name,
            inference_batch_size=inference_batch_size,
            overlap_value=overlap_value,
            resampled_resolutions=resampled_resolutions,
            prediction_proba_activate=prediction_proba_activate,
            gpu_per=gpu_per,
            verbosity_level=verbosity_level)
        # Predictions are shape of image, value = class of pixel

    # Final part of the function : generating the image if needed/ returning values
    if write_mode:
        for i, pred in enumerate(prediction):
            # Transform the prediction to an image
            n_classes = config_dict['n_classes']
            paint_vals = [
                int(255 * float(j) / (n_classes - 1)) for j in range(n_classes)
            ]

            # Create the mask with values in range 0-255
            mask = np.zeros_like(pred)
            for j in range(n_classes):
                mask[pred == j] = paint_vals[j]

            # Then we save the image
            image_name = convert_path(acquisitions_filenames[i]).stem
            ads.imwrite(
                path_acquisitions_folders[i] /
                (image_name + segmentations_filenames[i]), mask, 'png')

            axon_prediction, myelin_prediction = get_masks(
                path_acquisitions_folders[i] /
                (image_name + segmentations_filenames[i]))

    if prediction_proba_activate:
        return prediction, prediction_proba
    else:
        return prediction
Ejemplo n.º 6
0
def patched_to_dataset(path_patched_data,
                       path_dataset,
                       type_,
                       random_seed=None):
    """
    Creates a dataset using already created patches.
    :param path_patched_data: Path to where to find the folders where the patches folders are located.
    :param path_dataset: Path to where to create the newly formed dataset.
    :param type_: String, either 'unique' or 'mixed'. Unique means that we create a dataset with only TEM or only SEM
    data. "Mixed" means that we are creating a dataset with both type of images.
    :param random_seed: Int, the random seed to use to be able to consistenly recreate generated datasets.
    :return: None.
    """

    # If string, convert to Path objects
    path_patched_data = convert_path(path_patched_data)
    path_dataset = convert_path(path_dataset)

    # Using the randomseed fed so that given a fixed input, the generation of the datasets is always the same.
    np.random.seed(random_seed)

    # First we define where we are going to store the patched data
    if not path_dataset.exists():
        path_dataset.mkdir(parents=True)

    # First case: there is only one type of acquisition to use.
    if type_ == 'unique':

        i = 0  # Total patches index

        # We loop through all folders containing patches
        patches_folder_names = [f for f in path_patched_data.iterdir()]
        for patches_folder in tqdm(patches_folder_names):

            path_patches_folder = path_patched_data / patches_folder.name
            if path_patches_folder.is_dir():

                # We are now in the patches folder
                L_img, L_mask = [], []
                filenames = [f for f in path_patches_folder.iterdir()]
                for data in filenames:
                    root, index = data.stem.split('_')

                    if 'image' in data.name:
                        img = ads.imread(path_patches_folder / data.name)
                        L_img.append((img, int(index)))

                    elif 'mask' in data.name:
                        mask = ads.imread(path_patches_folder / data.name)
                        L_mask.append((mask, int(index)))

                # Now we sort the patches to be sure we get them in the right order
                L_img_sorted, L_mask_sorted = sort_list_files(L_img, L_mask)

                # Saving the images in the new folder
                for img, k in L_img_sorted:
                    ads.imwrite(path_dataset.joinpath('image_%s.png' % i), img)
                    ads.imwrite(path_dataset.joinpath('mask_%s.png' % i),
                                L_mask_sorted[k][0])
                    i = i + 1  # Using the global i here.

    # Else we are using different types of acquisitions. It's important to have them separated in a SEM folder
    # and in a TEM folder.
    elif type_ == 'mixed':
        # We determine which acquisition type we are going to upsample (understand : take the same images multiple times)
        SEM_patches_folder = path_patched_data / 'SEM'
        TEM_patches_folder = path_patched_data / 'TEM'

        minority_patches_folder, len_minority, majority_patches_folder, len_majority = find_minority_type(
            SEM_patches_folder, TEM_patches_folder)

        # First we move all patches from the majority acquisition type to the new dataset
        foldernames = [
            folder.name for folder in majority_patches_folder.iterdir()
        ]
        i = 0
        for patches_folder in tqdm(foldernames):

            path_patches_folder = majority_patches_folder / patches_folder
            if path_patches_folder.is_dir():
                # We are now in the patches folder
                L_img, L_mask = [], []
                filenames = [f for f in path_patches_folder.iterdir()]
                for data in path_patches_folder.iterdir():
                    root, index = data.stem.split('_')

                    if 'image' in data.name:
                        img = ads.imread(path_patches_folder / data.name)
                        L_img.append((img, int(index)))

                    elif 'mask' in data.name:
                        mask = ads.imread(path_patches_folder / data.name)
                        L_mask.append((mask, int(index)))
                # Now we sort the patches to be sure we get them in the right order
                L_img_sorted, L_mask_sorted = sort_list_files(L_img, L_mask)

                # Saving the images in the new folder
                for img, k in L_img_sorted:
                    ads.imwrite(path_dataset.joinpath('image_%s.png' % i), img)
                    ads.imwrite(path_dataset.joinpath('mask_%s.png' % i),
                                L_mask_sorted[k][0])
                    i = i + 1
        # Then we stratify - oversample the minority acquisition to the new dataset

        # We determine the ratio to take
        ratio_oversampling = float(len_majority) / len_minority

        # We go through each image folder in the minorty patches
        foldernames = [
            folder.name for folder in minority_patches_folder.iterdir()
        ]
        for patches_folder in tqdm(foldernames):

            path_patches_folder = minority_patches_folder / patches_folder
            if path_patches_folder.is_dir():

                # We are now in the patches folder
                # We load every image
                filenames = [f for f in path_patches_folder.iterdir()]
                n_img = np.floor(len(filenames) / 2)

                for data in filenames:
                    root, index = data.stem.split('_')
                    if 'image' in data.name:
                        img = ads.imread(path_patches_folder / data.name)
                        L_img.append((img, int(index)))

                    elif 'mask' in data.name:
                        mask = ads.imread(path_patches_folder / data.name)
                        L_mask.append((mask, int(index)))

                # Now we sort the patches to be sure we get them in the right order
                L_img_sorted, L_mask_sorted = sort_list_files(L_img, L_mask)
                L_merged_sorted = np.asarray([
                    L_img_sorted[j] + L_mask_sorted[j]
                    for j in range(len(L_img_sorted))
                ])

                # We create a new array composed of enough elements so that the two types of acquisitions are balanced
                # (oversampling)
                L_elements_to_save = L_merged_sorted[
                    np.random.choice(int(L_merged_sorted.shape[0]),
                                     int(np.ceil(ratio_oversampling * n_img)),
                                     replace=True), :]

                # Finally we save all the images in order at the new dataset path.
                for j in range(L_elements_to_save.shape[0]):
                    img = L_elements_to_save[j][0]
                    mask = L_elements_to_save[j][2]
                    ads.imwrite(path_dataset.joinpath('image_%s.png' % i), img)
                    ads.imwrite(path_dataset.joinpath('mask_%s.png' % i), mask)
                    i = i + 1
Ejemplo n.º 7
0
def raw_img_to_patches(path_raw_data,
                       path_patched_data,
                       thresh_indices=[0, 0.2, 0.8],
                       patch_size=512,
                       resampling_resolution=0.1):
    """
    Transform a raw acquisition to a folder of patches of size indicated in the arguments. Also performs resampling.
    Note: this functions needs to be run as many times as there are different general pixel size
    (thus different acquisition types / resolutions).
    :param path_raw_data: Path to where the raw image folders are located.
    :param path_patched_data: Path to where we will store the patched acquisitions.
    :param thresh_indices: List of float, determining the thresholds separating the classes.
    :param patch_size: Int, size of the patches to generate (and consequently input size of the network).
    :param resampling_resolution: Float, the resolution we need to resample to so that each sample
    has the same resolution in a dataset.
    :return: Nothing.
    """

    # If string, convert to Path objects
    path_raw_data = convert_path(path_raw_data)
    path_patched_data = convert_path(path_patched_data)

    # First we define where we are going to store the patched data and we create the directory if it does not exist.
    if not path_patched_data.exists():
        path_patched_data.mkdir(parents=True)

    # Loop over each raw image folder
    img_folder_names = [im.name for im in path_raw_data.iterdir()]
    for img_folder in tqdm(img_folder_names):
        path_img_folder = path_raw_data / img_folder
        if path_img_folder.is_dir():

            # We are now in the image folder.
            file = open(path_img_folder / 'pixel_size_in_micrometer.txt', 'r')
            pixel_size = float(file.read())
            resample_coeff = float(
                pixel_size
            ) / resampling_resolution  # Used to set the resolution to the general_pixel_size

            # We go through every file in the image folder
            data_names = [d.name for d in path_img_folder.iterdir()]
            for data in data_names:
                if 'image' in data:  # If it's the raw image.

                    img = ads.imread(path_img_folder / data)
                    img = rescale(img,
                                  resample_coeff,
                                  preserve_range=True,
                                  mode='constant').astype(int)

                elif 'mask' in data:
                    mask_init = ads.imread(path_img_folder / data)
                    mask = rescale(mask_init,
                                   resample_coeff,
                                   preserve_range=True,
                                   mode='constant',
                                   order=0)

                    # Set the mask values to the classes' values
                    mask = labellize_mask_2d(
                        mask, thresh_indices
                    )  # shape (size, size), values float 0.0-1.0

            to_extract = [img, mask]
            patches = extract_patch(to_extract, patch_size)
            # The patch extraction is done, now we put the new patches in the corresponding folders

            # We create it if it does not exist
            path_patched_folder = path_patched_data / img_folder
            if not path_patched_folder.exists():
                path_patched_folder.mkdir(parents=True)

            for j, patch in enumerate(patches):
                ads.imwrite(path_patched_folder.joinpath('image_%s.png' % j),
                            patch[0])
                ads.imwrite(path_patched_folder.joinpath('mask_%s.png' % j),
                            patch[1])
Ejemplo n.º 8
0
def segment_image(path_testing_image,
                  path_model,
                  overlap_value,
                  config,
                  resolution_model,
                  acquired_resolution=None,
                  verbosity_level=0):
    '''
    Segment the image located at the path_testing_image location.
    :param path_testing_image: the path of the image to segment.
    :param path_model: where to access the model
    :param overlap_value: the number of pixels to be used for overlap when doing prediction. Higher value means less
    border effects but more time to perform the segmentation.
    :param config: dict containing the configuration of the network
    :param resolution_model: the resolution the model was trained on.
    :param verbosity_level: Level of verbosity. The higher, the more information is given about the segmentation
    process.
    :return: Nothing.
    '''

    # If string, convert to Path objects
    path_testing_image = convert_path(path_testing_image)
    path_model = convert_path(path_model)

    if path_testing_image.exists():

        # Extracting the image name and its folder path from the total path.
        path_parts = path_testing_image.parts
        acquisition_name = Path(path_parts[-1])
        path_acquisition = Path(*path_parts[:-1])

        # Get type of model we are using
        selected_model = path_model.name

        # Read image
        img = ads.imread(str(path_testing_image))

        # Generate tmp file
        fp = open(path_acquisition / '__tmp_segment__.png', 'wb+')

        img_name_original = acquisition_name.stem

        ads.imwrite(fp, img, format='png')

        acquisition_name = Path(fp.name).name
        segmented_image_name = img_name_original + '_seg-axonmyelin' + '.png'

        # Performing the segmentation

        axon_segmentation(path_acquisitions_folders=path_acquisition,
                          acquisitions_filenames=[acquisition_name],
                          path_model_folder=path_model,
                          config_dict=config,
                          ckpt_name='model',
                          inference_batch_size=1,
                          overlap_value=overlap_value,
                          segmentations_filenames=segmented_image_name,
                          resampled_resolutions=resolution_model,
                          verbosity_level=verbosity_level,
                          acquired_resolution=acquired_resolution,
                          prediction_proba_activate=False,
                          write_mode=True)

        if verbosity_level >= 1:
            print(("Image {0} segmented.".format(path_testing_image)))

        # Remove temporary file used for the segmentation
        fp.close()
        (path_acquisition / '__tmp_segment__.png').unlink()

    else:
        print(("The path {0} does not exist.".format(path_testing_image)))

    return None
Ejemplo n.º 9
0
def segment_folders(path_testing_images_folder,
                    path_model,
                    overlap_value,
                    config,
                    resolution_model,
                    acquired_resolution=None,
                    verbosity_level=0):
    '''
    Segments the images contained in the image folders located in the path_testing_images_folder.
    :param path_testing_images_folder: the folder where all image folders are located (the images to segment are located
    in those image folders)
    :param path_model: where to access the model.
    :param overlap_value: the number of pixels to be used for overlap when doing prediction. Higher value means less
    border effects but more time to perform the segmentation.
    :param config: dict containing the configuration of the network
    :param resolution_model: the resolution the model was trained on.
    :param verbosity_level: Level of verbosity. The higher, the more information is given about the segmentation
    process.
    :return: Nothing.
    '''

    # If string, convert to Path objects
    path_testing_images_folder = convert_path(path_testing_images_folder)
    path_model = convert_path(path_model)

    # Update list of images to segment by selecting only image files (not already segmented or not masks)
    img_files = [
        file for file in path_testing_images_folder.iterdir()
        if (file.suffix.lower() in ('.png', '.jpg', '.jpeg', '.tif', '.tiff'))
        and (not str(file).endswith(('_seg-axonmyelin.png', '_seg-axon.png',
                                     '_seg-myelin.png', 'mask.png')))
    ]

    # Pre-processing: convert to png if not already done and adapt to model contrast
    for file_ in tqdm(img_files, desc="Segmentation..."):
        print(path_testing_images_folder / file_)
        try:
            height, width, _ = ads.imread(
                str(path_testing_images_folder / file_)).shape
        except:
            try:
                height, width = ads.imread(
                    str(path_testing_images_folder / file_)).shape
            except Exception as e:
                raise e

        image_size = [height, width]
        minimum_resolution = config[
            "trainingset_patchsize"] * resolution_model / min(image_size)

        if acquired_resolution < minimum_resolution:
            print(
                "EXCEPTION: The size of one of the images ({0}x{1}) is too small for the provided pixel size ({2}).\n"
                .format(height, width, acquired_resolution),
                "The image size must be at least {0}x{0} after resampling to a resolution of {1} to create standard sized patches.\n"
                .format(config["trainingset_patchsize"], resolution_model),
                "One of the dimensions of the image has a size of {0} after resampling to that resolution.\n"
                .format(
                    round(acquired_resolution * min(image_size) /
                          resolution_model)),
                "Image file location: {0}".format(
                    str(path_testing_images_folder / file_)))

            sys.exit(2)

        selected_model = path_model.name

        # Read image for conversion
        img = ads.imread(str(path_testing_images_folder / file_))

        # Generate tmpfile for segmentation pipeline
        fp = open(path_testing_images_folder / '__tmp_segment__.png', 'wb+')

        img_name_original = file_.stem

        ads.imwrite(fp, img, format='png')

        acquisition_name = Path(fp.name).name
        segmented_image_name = img_name_original + '_seg-axonmyelin' + '.png'

        axon_segmentation(path_acquisitions_folders=path_testing_images_folder,
                          acquisitions_filenames=[acquisition_name],
                          path_model_folder=path_model,
                          config_dict=config,
                          ckpt_name='model',
                          inference_batch_size=1,
                          overlap_value=overlap_value,
                          segmentations_filenames=[segmented_image_name],
                          acquired_resolution=acquired_resolution,
                          verbosity_level=verbosity_level,
                          resampled_resolutions=resolution_model,
                          prediction_proba_activate=False,
                          write_mode=True)

        if verbosity_level >= 1:
            tqdm.write("Image {0} segmented.".format(
                str(path_testing_images_folder / file_)))

        # Remove temporary file used for the segmentation
        fp.close()
        (path_testing_images_folder / '__tmp_segment__.png').unlink()

    return None
Ejemplo n.º 10
0
    def on_compute_morphometrics_button(self, event):
        """
        Compute morphometrics and save them to an Excel file.
        """

        # Get pixel size

        try:
            pixel_size = self.pixel_size_float
        except:
            with wx.TextEntryDialog(self,
                                    "Enter the pixel size in micrometer",
                                    value="0.07") as text_entry:
                if text_entry.ShowModal() == wx.ID_CANCEL:
                    return

                pixel_size_str = text_entry.GetValue()
            pixel_size = float(pixel_size_str)

        # Find the visible myelin and axon masks
        axon_mask_overlay = self.get_corrected_axon_overlay()
        if axon_mask_overlay is None:
            axon_mask_overlay = self.get_visible_axon_overlay()
        myelin_mask_overlay = self.get_visible_myelin_overlay()

        if (axon_mask_overlay is None) or (myelin_mask_overlay is None):
            return

        # store the data of the masks in variables as numpy arrays.
        # Note: since PIL uses a different convention for the X and Y coordinates, some array manipulation has to be
        # done.
        # Note 2 : The image array loaded in FSLeyes is flipped. We need to flip it back

        myelin_array = np.array(myelin_mask_overlay[:, :, 0] *
                                params.intensity['binary'],
                                copy=True,
                                dtype=np.uint8)
        myelin_array = np.flipud(myelin_array)
        myelin_array = np.rot90(myelin_array, k=1, axes=(1, 0))
        axon_array = np.array(axon_mask_overlay[:, :, 0] *
                              params.intensity['binary'],
                              copy=True,
                              dtype=np.uint8)
        axon_array = np.flipud(axon_array)
        axon_array = np.rot90(axon_array, k=1, axes=(1, 0))

        # Make sure the masks have the same size
        if myelin_array.shape != axon_array.shape:
            self.show_message("invalid visible masks dimensions")
            return

        # Save the arrays as PNG files
        pred = (myelin_array // 2 + axon_array).astype(np.uint8)

        pred_axon = pred > 200
        pred_myelin = np.logical_and(pred >= 50, pred <= 200)

        x = np.array([],
                     dtype=[('x0', 'f4'), ('y0', 'f4'), ('gratio', 'f4'),
                            ('axon_area', 'f4'), ('myelin_area', 'f4'),
                            ('axon_diam', 'f4'), ('myelin_thickness', 'f4'),
                            ('axonmyelin_area', 'f4'), ('solidity', 'f4'),
                            ('eccentricity', 'f4'), ('orientation', 'f4')])

        # Compute statistics
        stats_array = get_axon_morphometrics(im_axon=pred_axon,
                                             im_myelin=pred_myelin,
                                             pixel_size=pixel_size)

        for stats in stats_array:

            x = np.append(
                x,
                np.array([(stats['x0'], stats['y0'], stats['gratio'],
                           stats['axon_area'], stats['myelin_area'],
                           stats['axon_diam'], stats['myelin_thickness'],
                           stats['axonmyelin_area'], stats['solidity'],
                           stats['eccentricity'], stats['orientation'])],
                         dtype=x.dtype))

        with wx.FileDialog(self,
                           "Save morphometrics file",
                           wildcard="Excel files (*.xlsx)|*.xlsx",
                           style=wx.FD_SAVE
                           | wx.FD_OVERWRITE_PROMPT) as fileDialog:

            if fileDialog.ShowModal() == wx.ID_CANCEL:
                return  # the user changed their mind

            # save the current contents in the file
            pathname = fileDialog.GetPath()
            if not (pathname.lower().endswith((".xlsx", ".csv"))
                    ):  # If the user didn't add the extension, add it here
                pathname = pathname + ".xlsx"
            try:
                # Export to excel
                pd.DataFrame(x).to_excel(pathname)

            except IOError:
                wx.LogError("Cannot save current data in file '%s'." %
                            pathname)

        # Create the axon coordinate array
        mean_diameter_in_pixel = np.average(x['axon_diam']) / pixel_size
        axon_indexes = np.arange(x.size)
        number_array = postprocessing.generate_axon_numbers_image(
            axon_indexes, x['x0'], x['y0'], tuple(reversed(axon_array.shape)),
            mean_diameter_in_pixel)

        # Load the axon coordinate image into FSLeyes
        number_outfile = self.ads_temp_dir / "numbers.png"
        ads_utils.imwrite(number_outfile, number_array)
        self.load_png_image_from_path(number_outfile,
                                      is_mask=False,
                                      colormap="yellow")

        return
Ejemplo n.º 11
0
    def on_save_segmentation_button(self, event):
        """
        This function saves the active myelin and axon masks as PNG images. Three (3) images are generated in a folder
        selected by the user : one with the axon mask, one with the myelin mask and one with both.
        """

        # Find the visible myelin and axon masks
        axon_mask_overlay = self.get_corrected_axon_overlay()
        if axon_mask_overlay is None:
            axon_mask_overlay = self.get_visible_axon_overlay()
        myelin_mask_overlay = self.get_visible_myelin_overlay()

        if (axon_mask_overlay is None) or (myelin_mask_overlay is None):
            return

        # Ask the user where to save the segmentation
        with wx.DirDialog(
                self,
                "select the directory in which the segmentation will be save",
                defaultPath="",
                style=wx.DD_DEFAULT_STYLE | wx.DD_DIR_MUST_EXIST,
        ) as file_dialog:

            if file_dialog.ShowModal() == wx.ID_CANCEL:
                return

        save_dir = Path(file_dialog.GetPath())

        # store the data of the masks in variables as numpy arrays.
        # Note: since PIL uses a different convention for the X and Y coordinates, some array manipulation has to be
        # done.
        # Note 2 : The image array loaded in FSLeyes is flipped. We need to flip it back

        myelin_array = np.array(myelin_mask_overlay[:, :, 0],
                                copy=True,
                                dtype=np.uint8)
        myelin_array = np.flipud(myelin_array)
        myelin_array = np.rot90(myelin_array, k=1, axes=(1, 0))
        axon_array = np.array(axon_mask_overlay[:, :, 0],
                              copy=True,
                              dtype=np.uint8)
        axon_array = np.flipud(axon_array)
        axon_array = np.rot90(axon_array, k=1, axes=(1, 0))

        # Make sure the masks have the same size
        if myelin_array.shape != axon_array.shape:
            self.show_message("invalid visible masks dimensions")
            return

        # Remove the intersection
        myelin_array, axon_array, intersection = postprocessing.remove_intersection(
            myelin_array, axon_array, priority=1, return_overlap=True)

        if intersection.sum() > 0:
            self.show_message(
                "There is an overlap between the axon mask and the myelin mask. The myelin will have priority."
            )

        # Scale the pixel values of the masks to 255 for image saving
        myelin_array = myelin_array * params.intensity['binary']
        axon_array = axon_array * params.intensity['binary']

        # Save the arrays as PNG files
        myelin_and_axon_array = (myelin_array // 2 + axon_array).astype(
            np.uint8)
        ads_utils.imwrite(filename=(save_dir / "ADS_seg.png"),
                          img=myelin_and_axon_array)
        ads_utils.imwrite(filename=save_dir / "ADS_seg-myelin.png",
                          img=myelin_array)
        ads_utils.imwrite(filename=save_dir / "ADS_seg-axon.png",
                          img=axon_array)