def find_set_of_labels(image_list, field, output_key):
    label_set = set()
    for idx, image in enumerate(image_list):
        assert field in image, \
            "label normalisation layer requires {} input, " \
            "however it is not provided in the config file.\n" \
            "Please consider setting " \
            "label_normalisation to False.".format(field)
        print_progress_bar(idx, len(image_list),
                           prefix='searching unique labels from training files',
                           decimals=1, length=10, fill='*')
        unique_label = np.unique(image[field].get_data())
        if len(unique_label) > 500 or len(unique_label) <= 1:
            tf.logging.warning(
                'unusual discrete values: number of unique '
                'labels to normalise %s', len(unique_label))
        label_set.update(set(unique_label))
    label_set = list(label_set)
    label_set.sort()
    try:
        mapping_from_to = dict()
        mapping_from_to[output_key[0]] = tuple(label_set)
        mapping_from_to[output_key[1]] = tuple(range(0, len(label_set)))
    except (IndexError, ValueError):
        tf.logging.fatal("unable to create mappings keys: %s, image name %s",
                         output_key, field)
        raise
    return mapping_from_to
예제 #2
0
def _filename_to_image_list(file_list, mod_dict, data_param):
    """
    Converting a list of filenames to a list of image objects,
    Properties (e.g. interp_order) are added to each object
    """
    volume_list = []
    for idx in range(len(file_list)):
        print_progress_bar(idx, len(file_list),
                           prefix='reading datasets headers',
                           decimals=1, length=10, fill='*')
        # combine fieldnames and volumes as a dictionary
        _dict = {}
        for field, modalities in mod_dict.items():
            image_instance = _create_image(
                file_list, idx, modalities, data_param)
            if image_instance is not None:
                _dict[field] = image_instance
        if _dict:
            volume_list.append(_dict)
    if not volume_list:
        tf.logging.fatal(
            "Empty filename lists, please check the csv "
            "files. (removing csv_file keyword if it is in the config file "
            "to automatically search folders and generate new csv "
            "files again)\n\n"
            "Please note in the matched file names, each subject id are "
            "created by removing all keywords listed `filename_contains` "
            "in the config.\n\n"
            "E.g., `filename_contains=foo, bar` will match file "
            "foo_subject42_bar.nii.gz, and the subject id is _subject42_.")
        raise IOError
    return volume_list
예제 #3
0
def find_set_of_labels(image_list, field, output_key):
    label_set = set()
    for idx, image in enumerate(image_list):
        assert field in image, \
            "label normalisation layer requires {} input, " \
            "however it is not provided in the config file.\n" \
            "Please consider setting " \
            "label_normalisation to False.".format(field)
        print_progress_bar(
            idx,
            len(image_list),
            prefix='searching unique labels from training files',
            decimals=1,
            length=10,
            fill='*')
        unique_label = np.unique(image[field].get_data())
        if len(unique_label) > 500 or len(unique_label) <= 1:
            tf.logging.warning(
                'unusual discrete values: number of unique '
                'labels to normalise %s', len(unique_label))
        label_set.update(set(unique_label))
    label_set = list(label_set)
    label_set.sort()
    try:
        mapping_from_to = dict()
        mapping_from_to[output_key[0]] = tuple(label_set)
        mapping_from_to[output_key[1]] = tuple(range(0, len(label_set)))
    except (IndexError, ValueError):
        tf.logging.fatal("unable to create mappings keys: %s, image name %s",
                         output_key, field)
        raise
    return mapping_from_to
예제 #4
0
def _filename_to_image_list(file_list, mod_dict, data_param):
    """
    Converting a list of filenames to a list of image objects,
    Properties (e.g. interp_order) are added to each object
    """
    volume_list = []
    for idx in range(len(file_list)):
        # create image instance for each subject
        print_progress_bar(idx, len(file_list),
                           prefix='reading datasets headers',
                           decimals=1, length=10, fill='*')

        # combine fieldnames and volumes as a dictionary
        _dict = {}
        for field, modalities in mod_dict.items():
            _dict[field] = _create_image(file_list, idx, modalities, data_param)

        # skipping the subject if there're missing image components
        if _dict and None not in list(_dict.values()):
            volume_list.append(_dict)

    if not volume_list:
        tf.logging.fatal(
            "Empty filename lists, please check the csv "
            "files. (removing csv_file keyword if it is in the config file "
            "to automatically search folders and generate new csv "
            "files again)\n\n"
            "Please note in the matched file names, each subject id are "
            "created by removing all keywords listed `filename_contains` "
            "in the config.\n\n"
            "E.g., `filename_contains=foo, bar` will match file "
            "foo_subject42_bar.nii.gz, and the subject id is _subject42_.")
        raise IOError
    return volume_list
예제 #5
0
def progress_bar_wrapper(count, block_size, total_size):
    """
    Uses the common progress bar in the urlretrieve hook format
    """
    if block_size*5 >= total_size:
        # no progress bar for tiny files
        return
    print_progress_bar(
        iteration=count,
        total=math.ceil(float(total_size) / float(block_size)),
        prefix="Downloading (total: %.2f M): " % (total_size * 1.0 / 1e6))
예제 #6
0
def progress_bar_wrapper(count, block_size, total_size):
    """
    Uses the common progress bar in the urlretrieve hook format
    """
    if block_size * 5 >= total_size:
        # no progress bar for tiny files
        return
    print_progress_bar(iteration=count,
                       total=math.ceil(float(total_size) / float(block_size)),
                       prefix="Downloading (total: %.2f M): " %
                       (total_size * 1.0 / 1e6))
def create_mapping_from_multimod_arrayfiles(array_files, field, modalities,
                                            mod_to_train, cutoff,
                                            masking_function):
    """
    Performs the mapping creation based on a list of files. For each of the
    files (potentially multimodal), the landmarks are defined for each
    modality and stored in a database. The average of these landmarks is
    returned providing the landmarks to use for the linear mapping of any
    new incoming data

    :param array_files: List of image files to use
    :param modalities: Name of the modalities used for the
        standardisation and the corresponding order in the multimodal files
    :param cutoff: Minimum and maximum landmarks percentile values to use for
        the mapping
    :param masking_function: Describes how the mask is defined for each image.
    :return:
    """
    perc_database = {}
    for (i, p) in enumerate(array_files):
        print_progress_bar(i,
                           len(array_files),
                           prefix='normalisation histogram training',
                           decimals=1,
                           length=10,
                           fill='*')
        img_data = p[field].get_data()
        assert img_data.shape[4] == len(modalities), \
            "number of modalities are not consistent in the input image"
        for mod_i, m in enumerate(modalities):
            if m not in mod_to_train:
                continue
            if m not in perc_database.keys():
                perc_database[m] = []
            for t in range(img_data.shape[3]):
                img_3d = img_data[..., t, mod_i]
                if masking_function is not None:
                    mask_3d = masking_function(img_3d)
                else:
                    mask_3d = np.ones_like(img_3d, dtype=np.bool)
                perc = __compute_percentiles(img_3d, mask_3d, cutoff)
                perc_database[m].append(perc)
    mapping = {}
    for m in list(perc_database):
        perc_database[m] = np.vstack(perc_database[m])
        s1, s2 = create_standard_range()
        mapping[m] = tuple(__averaged_mapping(perc_database[m], s1, s2))
    return mapping
예제 #8
0
def create_mapping_from_multimod_arrayfiles(array_files,
                                            field,
                                            modalities,
                                            mod_to_train,
                                            cutoff,
                                            masking_function):
    """
    Performs the mapping creation based on a list of files. For each of the
    files (potentially multimodal), the landmarks are defined for each
    modality and stored in a database. The average of these landmarks is
    returned providing the landmarks to use for the linear mapping of any
    new incoming data

    :param array_files: List of image files to use
    :param modalities: Name of the modalities used for the
        standardisation and the corresponding order in the multimodal files
    :param cutoff: Minimum and maximum landmarks percentile values to use for
        the mapping
    :param masking_function: Describes how the mask is defined for each image.
    :return:
    """
    perc_database = {}
    for (i, p) in enumerate(array_files):
        print_progress_bar(i, len(array_files),
                           prefix='normalisation histogram training',
                           decimals=1, length=10, fill='*')
        img_data = p[field].get_data()
        assert img_data.shape[4] == len(modalities), \
            "number of modalities are not consistent in the input image"
        for mod_i, m in enumerate(modalities):
            if m not in mod_to_train:
                continue
            if m not in perc_database.keys():
                perc_database[m] = []
            for t in range(img_data.shape[3]):
                img_3d = img_data[..., t, mod_i]
                if masking_function is not None:
                    mask_3d = masking_function(img_3d)
                else:
                    mask_3d = np.ones_like(img_3d, dtype=np.bool)
                perc = __compute_percentiles(img_3d, mask_3d, cutoff)
                perc_database[m].append(perc)
    mapping = {}
    for m in list(perc_database):
        perc_database[m] = np.vstack(perc_database[m])
        s1, s2 = create_standard_range()
        mapping[m] = tuple(__averaged_mapping(perc_database[m], s1, s2))
    return mapping
예제 #9
0
def _filename_to_image_list(file_list, mod_dict, data_param):
    """
    converting a list of filenames to a list of image objects
    useful properties (e.g. interp_order) are added to each object
    """
    volume_list = []
    for idx in range(len(file_list)):
        print_progress_bar(idx,
                           len(file_list),
                           prefix='reading datasets headers',
                           decimals=1,
                           length=10,
                           fill='*')
        # combine fieldnames and volumes as a dictionary
        _dict = {
            field: _create_image(file_list, idx, modalities, data_param)
            for (field, modalities) in mod_dict.items()
        }
        volume_list.append(_dict)
    return volume_list