コード例 #1
0
def write_fom(args, direction_image, output_path_name) -> None:
    logger = get_logger(__name__)
    if args['inclination'] is not None:
        inclination_image = read_inclination(args)
    else:
        inclination_image = numpy.zeros_like(direction_image)

    output_data_type = '.' + args['output_type']
    if output_data_type not in ['.h5', '.tiff', '.tif']:
        logger.error('Output data type is not supported. Please choose a valid '
                     'datatype!')
        exit(1)
    saturation = None
    value = None
    if args['saturation']:
        saturation = SLIX.io.imread(args['saturation'])
    if args['value']:
        value = SLIX.io.imread(args['value'])
    rgb_fom = SLIX.visualization.direction(direction_image, inclination_image, saturation, value,
                                           available_colormaps[args['colormap']], args['direction_offset'])
    SLIX.io.imwrite_rgb(f"{output_path_name}fom_{args['colormap']}{output_data_type}", rgb_fom)
    if not args['disable_colorbubble']:
        SLIX.io.imwrite_rgb(f"{args['output']}/color_bubble_{args['colormap']}.tiff",
                            SLIX.visualization.color_bubble(available_colormaps[args['colormap']],
                                                            args['direction_offset']))
コード例 #2
0
def main():
    logger = get_logger("SLIXLineplotParameterGenerator")
    parser = create_argument_parser()
    arguments = parser.parse_args()
    args = vars(arguments)

    paths = args['input']
    if not isinstance(paths, list):
        paths = [paths]

    if not SLIX.io.check_output_dir(args['output']):
        exit(1)

    algorithm = ""
    first_val = -1
    second_val = -1
    if args['smoothing']:
        algorithm = args['smoothing'][0]
        if algorithm == "fourier":
            first_val = 0.25
            if len(args['smoothing']) > 1:
                first_val = float(args['smoothing'][1])

            second_val = 0.025
            if len(args['smoothing']) > 2:
                second_val = float(args['smoothing'][2])

        elif algorithm == "savgol":
            first_val = 45
            if len(args['smoothing']) > 1:
                first_val = int(args['smoothing'][1])

            second_val = 2
            if len(args['smoothing']) > 2:
                second_val = int(args['smoothing'][2])

    if len(paths) > 1:
        logger.info('Applying pool workers...')
        args = zip(paths, [not args['simple'] for _ in paths], [
            args['prominence_threshold'] for _ in paths
        ], [not args['without_angles'] for _ in paths], [
            args['output'] + '/' + os.path.splitext(os.path.basename(path))[0]
            for path in paths
        ], [algorithm for _ in paths], [first_val for _ in paths],
                   [second_val for _ in paths])
        with multiprocessing.Pool(None) as pool:
            pool.starmap(subprocess, args)
    else:
        tqdm_paths = tqdm.tqdm(paths)
        for path in tqdm_paths:
            filename_without_extension = \
                os.path.splitext(os.path.basename(path))[0]
            output_path_name = f'{args["output"]}/{filename_without_extension}'
            tqdm_paths.set_description(filename_without_extension)
            subprocess(path, not args['simple'], args['prominence_threshold'],
                       not args['without_angles'], output_path_name, algorithm,
                       first_val, second_val)
コード例 #3
0
    def __init__(self, dataset: h5py.Dataset):
        """
        Initialize the AttributeHandler with a already opened HDF5 dataset.
        This dataset will be used for all operations of this class.

        Args:

            dataset: h5py dataset
        """
        self.dataset: h5py.Dataset = dataset
        self.attrs: h5py.AttributeManager = dataset.attrs
        self.logger = get_logger(__name__)
コード例 #4
0
def write_vector(args, direction_image, output_path_name):
    logger = get_logger(__name__)
    image = SLIX.io.imread(args['slimeasurement'])
    UnitX, UnitY = SLIX.toolbox.unit_vectors(direction_image, use_gpu=False)

    # Try to fix image shape if the two axes are swapped
    if image.shape[:2] != UnitX.shape[:2] and \
            image.shape[:2][::-1] == UnitX.shape[:2]:
        image = image.T
    if image.shape[:2] != UnitX.shape[:2]:
        logger.warning("Direction and SLI measurement are not correctly aligned."
                       " The program will still run but the results might not represent"
                       " the expected result. Please check your input!")
    weight_map = read_weight_map(args['weight_map'])

    thinout = args['thinout']
    scale = args['scale']
    alpha = args['alpha']
    background_threshold = args['threshold']
    vector_width = args['vector_width']
    fig = plt.figure(dpi=args['dpi'])
    ax = fig.add_subplot()
    fig.subplots_adjust(0, 0, 1, 1, 0, 0)
    if vector_width < 0:
        vector_width = numpy.ceil(thinout / 3)
    if len(image.shape) == 2:
        ax.imshow(image, cmap='gray', origin='lower')
    else:
        ax.imshow(numpy.max(image, axis=-1), cmap='gray', origin='lower')
    ax.set_xlim(0, image.shape[1])
    ax.set_ylim(image.shape[0], 0)
    ax.axis('off')
    if args['distribution']:
        write_vector_distribution(UnitX, UnitY, alpha, args, output_path_name, scale, thinout, vector_width, weight_map,
                                  fig, ax)
    else:
        write_vector_field(UnitX, UnitY, alpha, args, background_threshold, output_path_name, scale, thinout,
                           vector_width, weight_map, fig, ax)
    plt.clf()
コード例 #5
0
ファイル: Cluster.py プロジェクト: 3d-pli/SLIX
def main():
    parser = create_argparse()
    arguments = parser.parse_args()
    args = vars(arguments)
    logger = get_logger("SLIXCluster")

    output_data_type = '.' + args['output_type']

    if output_data_type not in ['.nii', '.nii.gz', '.h5', '.tiff', '.tif']:
        logger.error(
            'Output data type is not supported. Please choose a valid '
            'datatype!')
        exit(1)

    if not io.check_output_dir(args['output']):
        exit(1)

    all = False
    inclination = False
    crossing = False
    flat = False

    if args['all']:
        all = True
    if args['inclination']:
        inclination = True
    if args['crossing']:
        crossing = True
    if args['flat']:
        flat = True

    # If no parameter map needs to be generated
    if not all and not inclination and not crossing and not flat:
        parser.print_help()
        sys.exit(0)

    # Load all parameter maps from the user given folder
    loaded_parameter_maps, basename = load_parameter_maps(args['input'])

    if flat or inclination or crossing:
        loaded_parameter_maps['flat_mask'] = classification.flat_mask(
            loaded_parameter_maps['high_prominence_peaks'],
            loaded_parameter_maps['low_prominence_peaks'],
            loaded_parameter_maps['peakdistance'])

        if flat:
            flat_name = basename.replace('basename', 'flat_mask')
            io.imwrite(f'{args["output"]}/{flat_name}{output_data_type}',
                       loaded_parameter_maps['flat_mask'])

    if inclination:
        inclination_mask = classification.inclinated_mask(
            loaded_parameter_maps['high_prominence_peaks'],
            loaded_parameter_maps['peakdistance'],
            loaded_parameter_maps['max'], loaded_parameter_maps['flat_mask'])
        inclination_name = basename.replace('basename', 'inclination_mask')
        io.imwrite(f'{args["output"]}/{inclination_name}{output_data_type}',
                   inclination_mask)

    if crossing:
        crossing_mask = classification.crossing_mask(
            loaded_parameter_maps['high_prominence_peaks'],
            loaded_parameter_maps['max'],
        )
        crossing_name = basename.replace('basename', 'crossing_mask')
        io.imwrite(f'{args["output"]}/{crossing_name}{output_data_type}',
                   crossing_mask)

    if all:
        full_mask = classification.full_mask(
            loaded_parameter_maps['high_prominence_peaks'],
            loaded_parameter_maps['low_prominence_peaks'],
            loaded_parameter_maps['peakdistance'],
            loaded_parameter_maps['max'])
        full_name = basename.replace('basename', 'classification_mask')
        io.imwrite(f'{args["output"]}/{full_name}{output_data_type}',
                   full_mask)
コード例 #6
0
try:
    try:
        import cupy
        from numba import cuda

        cupy.empty(0, dtype=float)
        from SLIX.GPU import toolbox as gpu_toolbox

        gpu_available = True
    except cupy.cuda.runtime.CUDARuntimeError:
        print('[WARNING] CuPy is installed but an error was thrown by the '
              'runtime. SLIX will fall back to the CPU variant.')
        gpu_available = False
    except (cuda.cudadrv.driver.CudaAPIError, cuda.cudadrv.driver.LinkerError):
        get_logger("SLIX").info(
            "Numba CUDA couldn't be initialized. "
            "Please check if there are problems with your CUDA / Numba "
            "version. SLIX will fall back to the CPU variant.")
        gpu_available = False
except (ModuleNotFoundError, NameError):
    gpu_available = False
    get_logger("SLIX").info(
        'CuPy is not installed. The toolbox will use the CPU '
        'variant instead. If you want to use the GPU variant, please run '
        '`pip install cupy`.')

__all__ = [
    'background_mask', 'centroid_correction', 'direction', 'unit_vectors',
    'num_peaks', 'mean_peak_prominence', 'peaks', 'peak_prominence',
    'peak_width', 'mean_peak_distance', 'peak_distance', 'mean_peak_width',
    'significant_peaks'
]
コード例 #7
0
def main():
    logger = get_logger("SLIXParameterGenerator")
    parser = create_argument_parser()
    arguments = parser.parse_args()
    args = vars(arguments)

    DIRECTION = True
    PEAKS = True
    PEAKWIDTH = True
    PEAKPROMINENCE = True
    PEAKDISTANCE = True
    INCLINATION_SIGN = True
    UNIT_VECTORS = False
    output_data_type = '.' + args['output_type']

    if output_data_type not in ['.nii', '.nii.gz', '.h5', '.tiff', '.tif']:
        logger.error(
            'Output data type is not supported. Please choose a valid '
            'datatype!')
        exit(1)

    if args['direction'] or args['peaks'] or args['peakprominence'] or \
            args['peakwidth'] or args['peakdistance'] or \
            args['unit_vectors'] or args['inclination_sign']:
        DIRECTION = args['direction']
        PEAKS = args['peaks']
        PEAKPROMINENCE = args['peakprominence']
        PEAKWIDTH = args['peakwidth']
        PEAKDISTANCE = args['peakdistance']
        INCLINATION_SIGN = args['inclination_sign']
        UNIT_VECTORS = args['unit_vectors']
    OPTIONAL = args['optional']
    if toolbox.gpu_available:
        toolbox.gpu_available = args['disable_gpu']

    logger.info(f'\nSLI Feature Generator:\n' + f'Chosen feature maps:\n' +
                f'Direction maps: {DIRECTION} \n' + f'Peak maps: {PEAKS} \n' +
                f'Peak prominence map: {PEAKPROMINENCE} \n' +
                f'Peak width map: {PEAKWIDTH} \n' +
                f'Peak distance map: {PEAKDISTANCE} \n' +
                f'Inclination sign map: {INCLINATION_SIGN} \n' +
                f'Unit vector maps: {UNIT_VECTORS} \n' +
                f'Optional maps: {OPTIONAL} \n')

    paths = args['input']
    if not isinstance(paths, list):
        paths = [paths]

    if not SLIX.io.check_output_dir(args['output']):
        exit(1)

    number_of_param_maps = numpy.count_nonzero([
        DIRECTION, PEAKS, PEAKPROMINENCE, PEAKWIDTH, PEAKDISTANCE,
        INCLINATION_SIGN, OPTIONAL, UNIT_VECTORS, args['smoothing']
        is not None, args['with_mask'], not args['no_centroids']
    ]) + 1
    tqdm_paths = tqdm.tqdm(paths)
    tqdm_step = tqdm.tqdm(total=number_of_param_maps)
    for path in tqdm_paths:
        if os.path.isdir(path):
            filename_without_extension = get_file_pattern(path)
        else:
            filename_without_extension = \
                os.path.splitext(os.path.basename(path))[0]
        output_path_name = f'{args["output"]}/{filename_without_extension}'
        tqdm_paths.set_description(filename_without_extension)

        tqdm_step.set_description('Reading image')
        image = io.imread(path)
        while len(image.shape) < 3:
            image = image[numpy.newaxis, ...]

        if os.path.isdir(path):
            io.imwrite(f'{output_path_name}_Stack' + output_data_type, image)

        if args['thinout'] > 1:
            image = preparation.thin_out(image,
                                         args['thinout'],
                                         strategy='average')
            output_path_name = f'{output_path_name}_thinout_{args["thinout"]}'
            io.imwrite(output_path_name + output_data_type, image)
        tqdm_step.update(1)

        if args['smoothing']:
            tqdm_step.set_description('Applying smoothing')
            result = smooth_image(args, image, output_path_name)
            if result is None:
                logger.error(f"Unknown smoothing option. "
                             f"Please use either 'fourier' or 'savgol'!")
                exit(1)
            else:
                image = result[0]
                output_path_name = result[1]
                del result

            tqdm_step.update(1)
            io.imwrite(output_path_name + output_data_type, image)

        if toolbox.gpu_available:
            image = cupy.array(image)

        if args['with_mask']:
            tqdm_step.set_description('Creating mask')
            mask = toolbox.background_mask(
                image,
                use_gpu=toolbox.gpu_available,
                return_numpy=not toolbox.gpu_available)
            image[mask, :] = 0
            if toolbox.gpu_available:
                mask = mask.get()
            io.imwrite(
                f'{output_path_name}_background_mask'
                f'{output_data_type}', mask)
            tqdm_step.update(1)

        tqdm_step.set_description('Generating peaks')
        significant_peaks = toolbox. \
            significant_peaks(image,
                              low_prominence=args['prominence_threshold'],
                              use_gpu=toolbox.gpu_available,
                              return_numpy=not toolbox.gpu_available)
        if toolbox.gpu_available:
            significant_peaks_cpu = significant_peaks.get()
        else:
            significant_peaks_cpu = significant_peaks

        if PEAKS:
            peaks = toolbox.peaks(image,
                                  use_gpu=toolbox.gpu_available,
                                  return_numpy=True)

            io.imwrite(
                f'{output_path_name}_high_prominence_peaks'
                f'{output_data_type}',
                numpy.sum(significant_peaks_cpu, axis=-1, dtype=numpy.uint16))
            io.imwrite(
                f'{output_path_name}_low_prominence_peaks'
                f'{output_data_type}',
                numpy.sum(peaks, axis=-1, dtype=numpy.uint16) -
                numpy.sum(significant_peaks_cpu, axis=-1, dtype=numpy.uint16))

            if args['detailed']:
                io.imwrite(
                    f'{output_path_name}_all_peaks_detailed'
                    f'{output_data_type}', peaks)
                io.imwrite(
                    f'{output_path_name}_high_prominence_peaks_detailed'
                    f'{output_data_type}', significant_peaks_cpu)

            tqdm_step.update(1)

        if PEAKPROMINENCE:
            tqdm_step.set_description('Generating peak prominence')

            io.imwrite(
                f'{output_path_name}_peakprominence'
                f'{output_data_type}',
                toolbox.mean_peak_prominence(image,
                                             significant_peaks,
                                             use_gpu=toolbox.gpu_available,
                                             return_numpy=True))

            if args['detailed']:
                peak_prominence_full = \
                    toolbox.peak_prominence(image,
                                            peak_image=significant_peaks,
                                            kind_of_normalization=1,
                                            use_gpu=toolbox.gpu_available,
                                            return_numpy=True)
                io.imwrite(
                    f'{output_path_name}_peakprominence_detailed'
                    f'{output_data_type}', peak_prominence_full)
                del peak_prominence_full

            tqdm_step.update(1)

        if PEAKWIDTH:
            tqdm_step.set_description('Generating peak width')

            io.imwrite(
                f'{output_path_name}_peakwidth'
                f'{output_data_type}',
                toolbox.mean_peak_width(image,
                                        significant_peaks,
                                        use_gpu=toolbox.gpu_available,
                                        return_numpy=True))

            if args['detailed']:
                peak_width_full = \
                    toolbox.peak_width(image, significant_peaks,
                                       use_gpu=toolbox.gpu_available,
                                       return_numpy=False)
                io.imwrite(
                    f'{output_path_name}_peakwidth_detailed'
                    f'{output_data_type}', peak_width_full)
                del peak_width_full

            tqdm_step.update(1)

        if args['no_centroids']:
            tqdm_step.set_description('Generating centroids')

            centroids = toolbox. \
                centroid_correction(image, significant_peaks,
                                    use_gpu=toolbox.gpu_available,
                                    return_numpy=not toolbox.gpu_available)

            if args['detailed']:
                if toolbox.gpu_available:
                    centroids_cpu = centroids.get()
                else:
                    centroids_cpu = centroids
                io.imwrite(
                    f'{output_path_name}_centroid_correction'
                    f'{output_data_type}', centroids_cpu)
                tqdm_step.update(1)
        else:
            # If no centroids are used, use zeros for all values instead.
            if toolbox.gpu_available:
                centroids = cupy.zeros(image.shape)
            else:
                centroids = numpy.zeros(image.shape)

        if PEAKDISTANCE:
            tqdm_step.set_description('Generating peak distance')

            io.imwrite(
                f'{output_path_name}_peakdistance'
                f'{output_data_type}',
                toolbox.mean_peak_distance(significant_peaks,
                                           centroids,
                                           use_gpu=toolbox.gpu_available,
                                           return_numpy=True))

            if args['detailed']:
                peak_distance_full = toolbox. \
                    peak_distance(significant_peaks, centroids,
                                  use_gpu=toolbox.gpu_available,
                                  return_numpy=True)
                io.imwrite(
                    f'{output_path_name}_peakdistance_detailed'
                    f'{output_data_type}', peak_distance_full)
                del peak_distance_full

            tqdm_step.update(1)

        if DIRECTION or UNIT_VECTORS:
            tqdm_step.set_description('Generating direction')
            direction = toolbox.direction(significant_peaks,
                                          centroids,
                                          correction_angle=args['correctdir'],
                                          use_gpu=toolbox.gpu_available,
                                          strategy=args['direction_strategy'],
                                          return_numpy=True)
            if DIRECTION:
                for dim in range(direction.shape[-1]):
                    io.imwrite(
                        f'{output_path_name}_dir_{dim + 1}'
                        f'{output_data_type}', direction[:, :, dim])
                tqdm_step.update(1)

            if UNIT_VECTORS:
                tqdm_step.set_description('Generating unit vectors')
                UnitX, UnitY = toolbox.unit_vectors(
                    direction,
                    use_gpu=toolbox.gpu_available,
                    return_numpy=True)
                UnitZ = numpy.zeros(UnitX.shape)
                for dim in range(UnitX.shape[-1]):
                    io.imwrite(
                        f'{output_path_name}'
                        f'_dir_{dim + 1}_UnitX.nii', UnitX[:, :, dim])
                    io.imwrite(
                        f'{output_path_name}'
                        f'_dir_{dim + 1}_UnitY.nii', UnitY[:, :, dim])
                    io.imwrite(
                        f'{output_path_name}'
                        f'_dir_{dim + 1}_UnitZ.nii', UnitZ[:, :, dim])

                tqdm_step.update(1)

        if INCLINATION_SIGN:
            tqdm_step.set_description('Generating inclination sign')
            inclination_sign = toolbox.inclination_sign(
                significant_peaks,
                centroids,
                correction_angle=args['correctdir'],
                use_gpu=toolbox.gpu_available,
                return_numpy=True)
            io.imwrite(
                f'{output_path_name}_inclination_sign{output_data_type}',
                inclination_sign)
            del inclination_sign
            tqdm_step.update(1)

        if OPTIONAL:
            tqdm_step.set_description('Generating optional maps')
            if toolbox.gpu_available:
                image = image.get()
            min_img = numpy.min(image, axis=-1)
            io.imwrite(f'{output_path_name}_min{output_data_type}', min_img)
            del min_img

            max_img = numpy.max(image, axis=-1)
            io.imwrite(f'{output_path_name}_max{output_data_type}', max_img)
            del max_img

            avg_img = numpy.average(image, axis=-1)
            io.imwrite(f'{output_path_name}_avg{output_data_type}', avg_img)
            del avg_img

            non_crossing_direction = toolbox. \
                direction(significant_peaks, centroids,
                          number_of_directions=1,
                          correction_angle=args['correctdir'],
                          use_gpu=toolbox.gpu_available,
                          return_numpy=True)
            io.imwrite(f'{output_path_name}_dir{output_data_type}',
                       non_crossing_direction)
            del non_crossing_direction
            tqdm_step.update(1)

        tqdm_step.reset()
    tqdm_step.close()