Example #1
0
def result_mapping(folder_models, path_datatest):
    """
    Create the U-net.
    Input :
        folder_models : path to the folder containing all the model folders to test.
        path_datatest : path to the folder containing the image to segment.

    Output :
        None. Save the segmented images in the data_test folder.
    """
    model_folders = [f for f in folder_models.iterdir()]
    for root in model_folders:
        if 'DS_Store' not in root:
            subpath_model = folder_models / root
            filename = '/config_network.json'
            with open(subpath_model / filename, 'r') as fd:
                config_network = json.loads(fd.read())

            axon_segmentation(path_datatest,
                              subpath_model,
                              config_network,
                              segmentations_filenames='segmentation_' + root +
                              '.png')

    return 'segmented'
def result_mapping(folder_models, path_datatest):
    """
    Create the U-net.
    Input :
        folder_models : string : path to the folder containing all the models folders to test.
        path_datatest : string : path to the folder containing the image to segment.

    Output :
        None. Save the segmented images in the data_test folder.
    """

    for root in os.listdir(folder_models)[:]:
        if 'DS_Store' not in root:
            subpath_model = os.path.join(folder_models, root)
            filename = '/config_network.json'
            with open(subpath_model + filename, 'r') as fd:
                config_network = json.loads(fd.read())

            axon_segmentation(path_datatest,
                              subpath_model,
                              config_network,
                              segmentations_filenames='segmentation_' + root +
                              '.png')

    return 'segmented'
Example #3
0
def segment_image(path_testing_image,
                  path_model,
                  overlap_value,
                  config,
                  resolution_model,
                  segmented_image_prefix,
                  acquired_resolution=0.0,
                  verbosity_level=0):
    '''
    Segment the image located at the path_testing_image location.
    :param path_testing_image: the path of the image to segment.
    :param path_model: where to access the model
    :param overlap_value: the number of pixels to be used for overlap when doing prediction. Higher value means less
    border effects but more time to perform the segmentation.
    :param config: dict containing the configuration of the network
    :param resolution_model: the resolution the model was trained on.
    :param segmented_image_prefix: the prefix to add before the segmented image.
    :param verbosity_level: Level of verbosity. The higher, the more information is given about the segmentation
    process.
    :return: Nothing.
    '''

    if os.path.exists(path_testing_image):

        # Extracting the image name and its folder path from the total path.
        tmp_path = path_testing_image.split('/')
        acquisition_name = tmp_path[-1]
        path_acquisition = '/'.join(tmp_path[:-1])

        # Performing the segmentation
        segmented_image_name = segmented_image_prefix + acquisition_name
        axon_segmentation(path_acquisitions_folders=path_acquisition,
                          acquisitions_filenames=[acquisition_name],
                          path_model_folder=path_model,
                          config_dict=config,
                          ckpt_name='model',
                          inference_batch_size=1,
                          overlap_value=overlap_value,
                          segmentations_filenames=segmented_image_name,
                          resampled_resolutions=resolution_model,
                          verbosity_level=verbosity_level,
                          acquired_resolution=acquired_resolution,
                          prediction_proba_activate=False,
                          write_mode=True)

        if verbosity_level >= 1:
            print "Image {0} segmented.".format(path_testing_image)

    else:

        print "The path {0} does not exist.".format(path_testing_image)

    return None
Example #4
0
def segment_image(path_testing_image, path_model,
                  overlap_value, config, resolution_model,
                  acquired_resolution = None, verbosity_level=0):

    '''
    Segment the image located at the path_testing_image location.
    :param path_testing_image: the path of the image to segment.
    :param path_model: where to access the model
    :param overlap_value: the number of pixels to be used for overlap when doing prediction. Higher value means less
    border effects but more time to perform the segmentation.
    :param config: dict containing the configuration of the network
    :param resolution_model: the resolution the model was trained on.
    :param verbosity_level: Level of verbosity. The higher, the more information is given about the segmentation
    process.
    :return: Nothing.
    '''

    # If string, convert to Path objects
    path_testing_image = convert_path(path_testing_image)
    path_model = convert_path(path_model)

    if path_testing_image.exists():

        # Extracting the image name and its folder path from the total path.
        path_parts = path_testing_image.parts
        acquisition_name = Path(path_parts[-1])
        path_acquisition = Path(*path_parts[:-1])

        # Get type of model we are using
        selected_model = path_model.name

 

        img_name_original = acquisition_name.stem

        # Performing the segmentation

        axon_segmentation(path_acquisitions_folders=path_acquisition, acquisitions_filenames=[acquisition_name],
                          path_model_folder=path_model, config_dict=config, ckpt_name='model',
                          inference_batch_size=1, overlap_value=overlap_value,
                          resampled_resolutions=resolution_model, verbosity_level=verbosity_level,
                          acquired_resolution=acquired_resolution,
                          prediction_proba_activate=False, write_mode=True)

        if verbosity_level >= 1:
            print(("Image {0} segmented.".format(path_testing_image)))


    else:
        print(("The path {0} does not exist.".format(path_testing_image)))

    return None
Example #5
0
def predict_loop(path_img,
                 modelsname,
                 acquired_resolution=0.004,
                 resampled_resolutions=0.004):
    path_folder, file_name = os.path.split(path_img)
    if not os.path.exists(path_folder + '/prediction/'):
        os.makedirs(path_folder + '/prediction/')
    if os.path.isfile(path_folder + '/prediction/' + file_name.split('.')[0] +
                      '_prediction.png'):
        print('exists, skip')
    else:
        tf.reset_default_graph()
        model_name = 'models/' + modelsname
        path_model = os.path.join(model_name)
        path_configfile = os.path.join(path_model, 'config_network.json')
        with open(path_configfile, 'r') as fd:
            config_network = json.loads(fd.read())
        prediction = axon_segmentation(
            path_folder,
            file_name,
            path_model,
            config_network,
            verbosity_level=3,
            resampled_resolutions=resampled_resolutions,
            acquired_resolution=acquired_resolution)
        print(path_folder + '/prediction/' + file_name.split('.')[0] +
              '_prediction.png')
        imsave(
            path_folder + '/prediction/' + file_name.split('.')[0] +
            '_prediction.png', prediction[0])
Example #6
0
def run(data):
	path_testing = data.get('path_testing')

	if data.get('model') == 'SVM':
		model_name = 'default_SEM_model_v1'
	else:
		model_name = 'default_TEM_model_v1'

	image_name = data.get('image_name')




	path_model  = os.path.join('./AxonDeepSeg/models',model_name)
	path_configfile = os.path.join(path_model,'config_network.json')

	if not os.path.exists(path_model):
	    os.makedirs(path_model)

	with open(path_configfile, 'r') as fd:
	    config_network = json.loads(fd.read())

	prediction = axon_segmentation(path_testing, image_name, path_model, config_network,verbosity_level=0)

	print("all done no errors")
Example #7
0
def segment_folders(path_testing_images_folder, path_model,
                    overlap_value, config, resolution_model, segmented_image_suffix,
                    acquired_resolution = 0.0,
                    verbosity_level=0):
    '''
    Segments the images contained in the image folders located in the path_testing_images_folder.
    :param path_testing_images_folder: the folder where all image folders are located (the images to segment are located
    in those image folders)
    :param path_model: where to access the model.
    :param overlap_value: the number of pixels to be used for overlap when doing prediction. Higher value means less
    border effects but more time to perform the segmentation.
    :param config: dict containing the configuration of the network
    :param resolution_model: the resolution the model was trained on.
    :param segmented_image_suffix: the prefix to add before the segmented image.
    :param verbosity_level: Level of verbosity. The higher, the more information is given about the segmentation
    process.
    :return: Nothing.
    '''

    # We loop over all image folders in the specified folded and we segment them one by one.

    # We loop through every file in the folder as we look for an image to segment
    for file_ in tqdm(os.listdir(path_testing_images_folder), desc="Segmentation..."):

        # We segment the image only if it's not already a segmentation.
        len_suffix = len(segmented_image_suffix)+4 # +4 for ".png"
        if (file_[-4:] == ".png") and (not (file_[-len_suffix:] == (segmented_image_suffix+'.png'))):

            # Performing the segmentation
            basename = file_.split('.')
            basename.pop() # We remove the extension.
            basename = ".".join(basename)
            segmented_image_name = basename + segmented_image_suffix + '.png'
            axon_segmentation(path_acquisitions_folders=path_testing_images_folder, acquisitions_filenames=[file_],
                              path_model_folder=path_model, config_dict=config, ckpt_name='model',
                              inference_batch_size=1, overlap_value=overlap_value,
                              segmentations_filenames=[segmented_image_name],
                              acquired_resolution=acquired_resolution,
                              verbosity_level=verbosity_level,
                              resampled_resolutions=resolution_model, prediction_proba_activate=False,
                              write_mode=True)

            if verbosity_level >= 1:
                print "Image {0} segmented.".format(os.path.join(path_testing_images_folder, file_))
    # The segmentation has been done for this image folder, we go to the next one.

    return None
Example #8
0
def integrity_test():

    try:

        # get path of directory where AxonDeepSeg was installed
        dir_path = Path(__file__).resolve().parent

        # input parameters

        path = Path('folder_name') / 'file_name'
        path_testing = dir_path / 'data_test'
        model_name = 'default_SEM_model'
        path_model = dir_path / 'models' / model_name
        path_configfile = path_model / 'config_network.json'

        # Read the configuration file 
        print('Reading test configuration file.')
        if not path_model.exists():
            path_model.mkdir(parents=True)

        with open(path_configfile, 'r') as fd:
            config_network = json.loads(fd.read())

        # Launch the axon and myelin segmentation on test image sample provided in the installation
        print('Computing the segmentation of axon and myelin on test image.')
        prediction = axon_segmentation([path_testing], ["image.png"], path_model, config_network, prediction_proba_activate=True, verbosity_level=4)

        # Read the ground truth mask and the obtained segmentation mask
        mask = ads.imread(path_testing / 'mask.png')
        pred = ads.imread(path_testing / 'AxonDeepSeg.png')

        # Generate separate axon and myelin masks of the segmentation output
        print('Generating axon and myelin segmentation masks and saving.')
        gt_axon = mask > 200 # Generate binary image with the axons for the ground truth (myelin=127, axon=255)
        gt_myelin = np.logical_and(mask >= 50, mask <= 200) # Generate binary image with the myelin for the ground truth (myelin=127, axon=255)

        pred_axon = pred > 200 # Generate binary image with the axons for the segmentation (myelin=127, axon=255)
        pred_myelin = np.logical_and(pred >= 50, pred <= 200) # Generate binary image with the myelin for the segmentation (myelin=127, axon=255)

        # Compute Dice between segmentation and ground truth, for both axon and myelin
        dice_axon = pw_dice(pred_axon, gt_axon)
        dice_myelin = pw_dice(pred_myelin, gt_myelin)

        # If all the commands above are executed without bugs, the installation is done correctly
        print("* * * Integrity test passed. AxonDeepSeg is correctly installed. * * * ")
        return 0

    except IOError:

        # Else, there is a problem in the installation
        print("Integrity test failed... ")
        return -1
def map_model_to_images(folder_model,
                        path_datatests,
                        batch_size=1,
                        gps=0.1,
                        crop_value=25,
                        gpu_per=1.0):
    """
    Apply one trained model to all the specified images
    """

    # Load config
    with open(os.path.join(folder_model, 'config_network.json'), 'r') as fd:
        config_network = json.loads(fd.read())

    path_images = [
        os.path.join(path_datatests, e) for e in os.listdir(path_datatests)[:]
        if os.path.isdir(os.path.join(path_datatests, e))
    ]
    n_images = len(path_images)
    path_images_list = list(segment_list(path_images, 20))

    if type(gps) != list:
        gps = n_images * [gps]
    gps_list = list(segment_list(gps, 20))

    for i, path_images_iter in enumerate(path_images_list):
        gps_iter = gps_list[i]
        axon_segmentation(path_images_iter,
                          folder_model,
                          config_network,
                          segmentations_filenames='segmentation.png',
                          inference_batch_size=batch_size,
                          write_mode=True,
                          prediction_proba_activate=False,
                          resampled_resolutions=gps_iter,
                          overlap_value=crop_value,
                          gpu_per=gpu_per)
Example #10
0
    path_testing = "../AxonDeepSeg/data_test/"
    model_name = 'default_SEM_model_v1'
    path_model = '../AxonDeepSeg/models/' + model_name

    path_configfile = path_model + '/config_network.json'

    if not os.path.exists(path_model):
        os.makedirs(path_model)

    with open(path_configfile, 'r') as fd:
        config_network = json.loads(fd.read())

    from AxonDeepSeg.apply_model import axon_segmentation

    prediction = axon_segmentation([path_testing], ["image.png"],
                                   path_model,
                                   config_network,
                                   verbosity_level=0)

    mask = imread(path_testing + '/mask.png', flatten=True)
    pred = imread(path_testing + '/AxonDeepSeg.png', flatten=True)

    gt_axon = mask > 200
    gt_myelin = np.logical_and(mask >= 50, mask <= 200)

    pred_axon = pred > 200
    pred_myelin = np.logical_and(pred >= 50, pred <= 200)

    dice_axon = pw_dice(pred_axon, gt_axon)
    dice_myelin = pw_dice(pred_myelin, gt_myelin)

    print "* * * Integrity test passed. AxonDeepSeg is correctly installed. * * * "
Example #11
0
def segment_image(path_testing_image,
                  path_model,
                  overlap_value,
                  config,
                  resolution_model,
                  segmented_image_prefix,
                  acquired_resolution=0.0,
                  verbosity_level=0):
    '''
	Segment the image located at the path_testing_image location.
	:param path_testing_image: the path of the image to segment.
	:param path_model: where to access the model
	:param overlap_value: the number of pixels to be used for overlap when doing prediction. Higher value means less
	border effects but more time to perform the segmentation.
	:param config: dict containing the configuration of the network
	:param resolution_model: the resolution the model was trained on.
	:param segmented_image_prefix: the prefix to add before the segmented image.
	:param verbosity_level: Level of verbosity. The higher, the more information is given about the segmentation
	process.
	:return: Nothing.
	'''

    if os.path.exists(path_testing_image):

        # Extracting the image name and its folder path from the total path.
        tmp_path = path_testing_image.split('/')
        acquisition_name = tmp_path[-1]
        path_acquisition = '/'.join(tmp_path[:-1])

        # Get type of model we are using
        tmp_path, selected_model = os.path.split(path_model)

        # Read image
        img = imageio.imread(os.path.join(path_acquisition, acquisition_name))

        # Generate tmp file
        fp = tempfile.NamedTemporaryFile(dir=path_acquisition,
                                         suffix='.png',
                                         mode='wb')

        img_name_original, file_extension = os.path.splitext(acquisition_name)

        if selected_model == "default_TEM_model_v1":
            imageio.imwrite(fp, 255 - img, format='png')
        else:
            imageio.imwrite(fp, img, format='png')

        tmp_path, tmp_name = os.path.split(fp.name)
        acquisition_name = tmp_name
        segmented_image_name = img_name_original + '_seg-axonmyelin' + '.png'

        # Performing the segmentation

        axon_segmentation(path_acquisitions_folders=path_acquisition,
                          acquisitions_filenames=[acquisition_name],
                          path_model_folder=path_model,
                          config_dict=config,
                          ckpt_name='model',
                          inference_batch_size=1,
                          overlap_value=overlap_value,
                          segmentations_filenames=segmented_image_name,
                          resampled_resolutions=resolution_model,
                          verbosity_level=verbosity_level,
                          acquired_resolution=acquired_resolution,
                          prediction_proba_activate=False,
                          write_mode=True)

        if verbosity_level >= 1:
            print "Image {0} segmented.".format(path_testing_image)

    else:
        print "The path {0} does not exist.".format(path_testing_image)

    # Remove temporary file used for the segmentation
    fp.close()

    return None
Example #12
0
def segment_folders(path_testing_images_folder,
                    path_model,
                    overlap_value,
                    config,
                    resolution_model,
                    segmented_image_suffix,
                    acquired_resolution=0.0,
                    verbosity_level=0):
    '''
	Segments the images contained in the image folders located in the path_testing_images_folder.
	:param path_testing_images_folder: the folder where all image folders are located (the images to segment are located
	in those image folders)
	:param path_model: where to access the model.
	:param overlap_value: the number of pixels to be used for overlap when doing prediction. Higher value means less
	border effects but more time to perform the segmentation.
	:param config: dict containing the configuration of the network
	:param resolution_model: the resolution the model was trained on.
	:param segmented_image_suffix: the prefix to add before the segmented image.
	:param verbosity_level: Level of verbosity. The higher, the more information is given about the segmentation
	process.
	:return: Nothing.
	'''

    # Update list of images to segment by selecting only image files (not already segmented or not masks)
    img_files = [
        file for file in os.listdir(path_testing_images_folder)
        if (file.endswith(('.png', '.jpg', '.jpeg', '.tif', '.tiff')) and (
            not file.endswith(('_seg-axonmyelin.png', '_seg-axon.png',
                               '_seg-myelin.png', 'mask.png'))))
    ]

    # Pre-processing: convert to png if not already done and adapt to model contrast
    for file_ in tqdm(img_files, desc="Segmentation..."):

        tmp_path, selected_model = os.path.split(path_model)

        # Read image for conversion
        img = imageio.imread(os.path.join(path_testing_images_folder, file_))

        # Generate tmpfile for segmentation pipeline
        fp = tempfile.NamedTemporaryFile(dir=path_testing_images_folder,
                                         suffix='.png',
                                         mode='wb')

        img_name_original, file_extension = os.path.splitext(file_)

        if selected_model == "default_TEM_model_v1":
            imageio.imwrite(fp, 255 - img, format='png')
        else:
            imageio.imwrite(fp, img, format='png')

        tmp_path, tmp_name = os.path.split(fp.name)
        acquisition_name = tmp_name
        segmented_image_name = img_name_original + '_seg-axonmyelin' + '.png'

        axon_segmentation(path_acquisitions_folders=path_testing_images_folder,
                          acquisitions_filenames=[acquisition_name],
                          path_model_folder=path_model,
                          config_dict=config,
                          ckpt_name='model',
                          inference_batch_size=1,
                          overlap_value=overlap_value,
                          segmentations_filenames=[segmented_image_name],
                          acquired_resolution=acquired_resolution,
                          verbosity_level=verbosity_level,
                          resampled_resolutions=resolution_model,
                          prediction_proba_activate=False,
                          write_mode=True)

        if verbosity_level >= 1:
            tqdm.write("Image {0} segmented.".format(
                os.path.join(path_testing_images_folder, file_)))

        # Remove temporary file used for the segmentation
        fp.close()

    return None
Example #13
0
def generate_statistics(path_model_folder,
                        path_images_folder,
                        resampled_resolution,
                        overlap_value,
                        verbosity_level=0):
    """
    Generates the implemented statistics for all the checkpoints of a given model, for each requested image.
    :param path_model_folder: Path to the model to use.
    :param path_images_folder: Path to the folders that contain the images to compute the metrics on.
    :param resampled_resolution: Float, the resolution to resample to to make the predictions.
    :param overlap_value: Int, the number of pixels to use for overlap.
    :param verbosity_level: Int. The higher, the more displayed information.
    :return:
    """

    model_statistics_dict = {"date": time.strftime("%Y-%m-%d"), "data": {}}
    # First we load the network parameters from the config file
    with open(os.path.join(path_model_folder, 'config_network.json'),
              'r') as fd:
        config_network = json.loads(fd.read())

    n_classes = config_network['n_classes']
    model_name = path_model_folder.split("/")[
        -2]  # Extraction of the name of the model.

    # We loop over all checkpoint files to compute statistics for each checkpoint.
    for checkpoint in os.listdir(path_model_folder):

        if checkpoint[-10:] == '.ckpt.meta':

            result_model = {}
            name_checkpoint = checkpoint[:-10]

            result_model.update({
                'id_model': model_name,
                'ckpt': name_checkpoint,
                'config': config_network
            })

            # 1/ We load the saved training statistics, which are independent of the testing images

            try:
                f = open(path_model_folder + '/' + name_checkpoint + '.pkl',
                         'r')
                res = pickle.load(f)
                acc_stats = res['accuracy']
                loss_stats = res['loss']
                epoch_stats = res['steps']

            except:
                print 'No stats file found...'
                #f = open(path_model_folder + '/evolution.pkl', 'r')
                #res = pickle.load(f)
                #epoch_stats = max(res['steps'])
                #acc_stats = np.mean(res['accuracy'][-10:])
                #loss_stats = np.mean(res['loss'][-10:])

                epoch_stats = None
                acc_stats = None
                loss_stats = None

            result_model.update({
                'training_stats': {
                    'training_epoch': epoch_stats,
                    'training_mvg_avg10_acc': acc_stats,
                    'training_mvg_avg10_loss': loss_stats
                },
                'testing_stats': {}
            })

            # 2/ Computation of the predictions / outputs of the network for each image at the same time.

            predictions, outputs_network = axon_segmentation(
                path_images_folder, ['image.png'] * len(path_images_folder),
                path_model_folder,
                config_network,
                ckpt_name=name_checkpoint,
                overlap_value=overlap_value,
                resampled_resolutions=[resampled_resolution] *
                len(path_images_folder),
                prediction_proba_activate=True,
                write_mode=False,
                gpu_per=1.0,
                verbosity_level=verbosity_level)
            # These two variables are list, as long as the number of images that are tested.

            if verbosity_level >= 2:
                print 'Statistics extraction...'

            # 3/ Computation of the statistics for each image.
            for i, image_folder in tqdm(enumerate(path_images_folder)):

                current_prediction = predictions[i]
                current_network_output = outputs_network[i]

                # Reading the images and processing them
                mask_raw = imread(os.path.join(image_folder, 'mask.png'),
                                  flatten=True,
                                  mode='L')
                mask = labellize(mask_raw)

                # We infer the name of the different files
                name_image = image_folder.split('/')[-1]

                # Computing metrics and storing them in the json file.
                current_proba = output_network_to_proba(
                    current_network_output, n_classes)
                testing_stats_dict = compute_metrics(current_prediction,
                                                     current_proba, mask,
                                                     n_classes)
                result_model['testing_stats'].update(
                    {name_image: testing_stats_dict})

            # We add the metrics for all the checkpoints from this model (on all images) to the data list.
            model_statistics_dict["data"].update(
                {name_checkpoint: result_model})

    return model_statistics_dict
Example #14
0
def segment_image(path_testing_image, path_model,
                  overlap_value, config, resolution_model,
                  acquired_resolution = None, verbosity_level=0):

    '''
    Segment the image located at the path_testing_image location.
    :param path_testing_image: the path of the image to segment.
    :param path_model: where to access the model
    :param overlap_value: the number of pixels to be used for overlap when doing prediction. Higher value means less
    border effects but more time to perform the segmentation.
    :param config: dict containing the configuration of the network
    :param resolution_model: the resolution the model was trained on.
    :param verbosity_level: Level of verbosity. The higher, the more information is given about the segmentation
    process.
    :return: Nothing.
    '''

    # If string, convert to Path objects
    path_testing_image = convert_path(path_testing_image)
    path_model = convert_path(path_model)

    if path_testing_image.exists():

        # Extracting the image name and its folder path from the total path.
        path_parts = path_testing_image.parts
        acquisition_name = Path(path_parts[-1])
        path_acquisition = Path(*path_parts[:-1])

        # Get type of model we are using
        selected_model = path_model.name

        # Read image
        img = imageio.imread(str(path_testing_image))

        # Generate tmp file
        fp = open(path_acquisition / '__tmp_segment__.png', 'wb+')

        img_name_original = acquisition_name.stem

        if selected_model == "default_TEM_model_v1":
            imageio.imwrite(fp,255-img, format='png')
        else:
            imageio.imwrite(fp, img, format='png')

        acquisition_name = Path(fp.name).name
        segmented_image_name = img_name_original + '_seg-axonmyelin' + '.png'

        # Performing the segmentation

        axon_segmentation(path_acquisitions_folders=path_acquisition, acquisitions_filenames=[acquisition_name],
                          path_model_folder=path_model, config_dict=config, ckpt_name='model',
                          inference_batch_size=1, overlap_value=overlap_value,
                          segmentations_filenames=segmented_image_name,
                          resampled_resolutions=resolution_model, verbosity_level=verbosity_level,
                          acquired_resolution=acquired_resolution,
                          prediction_proba_activate=False, write_mode=True)

        if verbosity_level >= 1:
            print(("Image {0} segmented.".format(path_testing_image)))

        # Remove temporary file used for the segmentation
        fp.close()
        (path_acquisition / '__tmp_segment__.png').unlink()

    else:
        print(("The path {0} does not exist.".format(path_testing_image)))

    return None
Example #15
0
def segment_folders(path_testing_images_folder, path_model,
                    overlap_value, config, resolution_model,
                    acquired_resolution = None,
                    verbosity_level=0):
    '''
    Segments the images contained in the image folders located in the path_testing_images_folder.
    :param path_testing_images_folder: the folder where all image folders are located (the images to segment are located
    in those image folders)
    :param path_model: where to access the model.
    :param overlap_value: the number of pixels to be used for overlap when doing prediction. Higher value means less
    border effects but more time to perform the segmentation.
    :param config: dict containing the configuration of the network
    :param resolution_model: the resolution the model was trained on.
    :param verbosity_level: Level of verbosity. The higher, the more information is given about the segmentation
    process.
    :return: Nothing.
    '''

    # If string, convert to Path objects
    path_testing_images_folder = convert_path(path_testing_images_folder)
    path_model = convert_path(path_model)

    # Update list of images to segment by selecting only image files (not already segmented or not masks)
    img_files = [file for file in path_testing_images_folder.iterdir() if (file.suffix.lower() in ('.png','.jpg','.jpeg','.tif','.tiff'))
                 and (not str(file).endswith(('_seg-axonmyelin.png','_seg-axon.png','_seg-myelin.png','mask.png')))]

    # Pre-processing: convert to png if not already done and adapt to model contrast
    for file_ in tqdm(img_files, desc="Segmentation..."):
        print(path_testing_images_folder / file_)
        try:
            height, width, _ = imageio.imread(str(path_testing_images_folder / file_)).shape
        except:
            try:
                height, width = imageio.imread(str(path_testing_images_folder / file_)).shape
            except Exception as e:
                raise e

        image_size = [height, width]
        minimum_resolution = config["trainingset_patchsize"] * resolution_model / min(image_size)

        if acquired_resolution < minimum_resolution:
            print("EXCEPTION: The size of one of the images ({0}x{1}) is too small for the provided pixel size ({2}).\n".format(height, width, acquired_resolution),
                  "The image size must be at least {0}x{0} after resampling to a resolution of {1} to create standard sized patches.\n".format(config["trainingset_patchsize"], resolution_model),
                  "One of the dimensions of the image has a size of {0} after resampling to that resolution.\n".format(round(acquired_resolution * min(image_size) / resolution_model)),
                  "Image file location: {0}".format(str(path_testing_images_folder / file_))
            )

            sys.exit(2)

        selected_model = path_model.name

        # Read image for conversion
        img = imageio.imread(str(path_testing_images_folder / file_))

        # Generate tmpfile for segmentation pipeline
        fp = open(path_testing_images_folder / '__tmp_segment__.png', 'wb+')

        img_name_original = file_.stem

        if selected_model == "default_TEM_model_v1":
            imageio.imwrite(fp,255-img, format='png')
        else:
            imageio.imwrite(fp,img, format='png')

        acquisition_name = Path(fp.name).name
        segmented_image_name = img_name_original + '_seg-axonmyelin' + '.png'

        axon_segmentation(path_acquisitions_folders=path_testing_images_folder, acquisitions_filenames=[acquisition_name],
                              path_model_folder=path_model, config_dict=config, ckpt_name='model',
                              inference_batch_size=1, overlap_value=overlap_value,
                              segmentations_filenames=[segmented_image_name],
                              acquired_resolution=acquired_resolution,
                              verbosity_level=verbosity_level,
                              resampled_resolutions=resolution_model, prediction_proba_activate=False,
                              write_mode=True)

        if verbosity_level >= 1:
            tqdm.write("Image {0} segmented.".format(str(path_testing_images_folder / file_)))

        # Remove temporary file used for the segmentation
        fp.close()
        (path_testing_images_folder / '__tmp_segment__.png').unlink()

    return None
Example #16
0
def integrity_test():

    try:

        import json
        import os
        from AxonDeepSeg.testing.segmentation_scoring import *
        from time import time
        from AxonDeepSeg.apply_model import axon_segmentation
        from scipy.misc import imread, imsave

        # get path of directory where AxonDeepSeg was installed
        dir_path = os.path.dirname(os.path.abspath(__file__))

        # input parameters

        path = os.path.join('folder_name', 'file_name')
        path_testing = os.path.join(dir_path, 'data_test')
        model_name = 'default_SEM_model_v1'
        path_model = os.path.join(dir_path, 'models', model_name)
        path_configfile = os.path.join(path_model, 'config_network.json')

        # Read the configuration file
        print('Reading test configuration file.')
        if not os.path.exists(path_model):
            os.makedirs(path_model)

        with open(path_configfile, 'r') as fd:
            config_network = json.loads(fd.read())

        # Launch the axon and myelin segmentation on test image sample provided in the installation
        print('Computing the segmentation of axon and myelin on test image.')
        prediction = axon_segmentation([path_testing], ["image.png"],
                                       path_model,
                                       config_network,
                                       verbosity_level=0)

        # Read the ground truth mask and the obtained segmentation mask
        mask = imread(path_testing + '/mask.png', flatten=True)
        pred = imread(path_testing + '/AxonDeepSeg.png', flatten=True)

        # Generate separate axon and myelin masks of the segmentation output
        print('Generating axon and myelin segmentation masks and saving.')
        gt_axon = mask > 200  # Generate binary image with the axons for the ground truth (myelin=127, axon=255)
        gt_myelin = np.logical_and(
            mask >= 50, mask <= 200
        )  # Generate binary image with the myelin for the ground truth (myelin=127, axon=255)

        pred_axon = pred > 200  # Generate binary image with the axons for the segmentation (myelin=127, axon=255)
        pred_myelin = np.logical_and(
            pred >= 50, pred <= 200
        )  # Generate binary image with the myelin for the segmentation (myelin=127, axon=255)

        # Compute Dice between segmentation and ground truth, for both axon and myelin
        dice_axon = pw_dice(pred_axon, gt_axon)
        dice_myelin = pw_dice(pred_myelin, gt_myelin)

        # If all the commands above are executed without bugs, the installation is done correctly
        print(
            "* * * Integrity test passed. AxonDeepSeg is correctly installed. * * * "
        )
        return 0

    except IOError:

        # Else, there is a problem in the installation
        print("Integrity test failed... ")
        return -1
Example #17
0
# $ cd AxonSegmentation/AxonDeepSeg
# $ python learn_model.py -p path_bireli_training -m path_bireli_model_new -lr 0.0005
# or
# $ python learn_model.py -p path_bireli_training -m path_bireli_model_new -m_init path_bireli_model_init  -lr 0.0005

#- In a new window to visualize the training performances
# $ scp -r  path_bireli_model_new path_model_new


# #----------------------Training the MRF from the paths_training---------------------#
from AxonDeepSeg.mrf import learn_mrf
learn_mrf(paths_training, path_mrf)

#----------------------Axon segmentation with a trained model and trained mrf---------------------#
from AxonDeepSeg.apply_model import axon_segmentation
axon_segmentation(path_my_data, path_model, path_mrf)

#----------------------Myelin segmentation from Axon segmentation--------------------#
from AxonDeepSeg.apply_model import myelin
myelin(path_my_data)

#----------------------Axon and Myelin segmentation--------------------#
from AxonDeepSeg.apply_model import pipeline
pipeline(path_my_data,path_model,path_mrf)

#----------------------Visualization of the results--------------------#
from AxonDeepSeg.evaluation.visualization import visualize_results
visualize_results(path_my_data)