Esempio n. 1
0
def measure_max(source,
                points,
                search,
                radii,
                sink=None,
                processes=None,
                verbose=False):
    """Find index in local search indices with a voxel with value smaller than a specified value for a list of points. 
    
  Arguments
  ---------
  source : array
    Data source.
  points : array
    List of linear indices of center points.
  search : array
    List of linear indices to add to the center index defining the local search area.
  radii : array
    The maximal index in the search array for each point to use.
  sink : array or None
    Optional sink for result indices.
  processes : int or None
    Number of processes to use.
  verbose : bool
    If True, print progress info.
  
  Returns
  -------
  sink : array
    Linear array with length of points containing the first search index with voxel below value.
  """

    processes, timer = ap._initialize_processing(processes=processes,
                                                 verbose=verbose,
                                                 function='measure_max')
    source1d = ap._initialize_source(source, as_1d=True)

    sink = ap._initialize_sink(sink=sink,
                               shape=points.shape,
                               dtype=source.dtype)

    if sink.shape != points.shape:
        raise RuntimeError('Sink has invalid size %r not %r' %
                           (points.shape, points.shape))

    #print(source1d.dtype, points.dtype, search.dtype, type(value), sink.dtype);
    #print(source1d.shape, points.shape, sink.shape)
    print source1d, points, search, radii, sink
    code.measure_max(source1d, points, search, radii, sink, processes)

    ap._finalize_processing(verbose=verbose,
                            function='measure_max',
                            timer=timer)

    return sink
Esempio n. 2
0
def smooth_by_counting(source, sink = None, low = 5, high = 10, shape = None):
  """Smooth binary image by counting neighbours.
  
  Arguments
  ---------
  source : array
    The binary source to smooth.
  sink : array or None.
    The sink to write the smoothed source to.
  low : int
    If a voxel has less then this number of 26-neighbours it is set to False.
  high : int
    If a voxel has more then this number of 26-neighbours it is set to True.
  shape : tuple of int or None
    The shape of the square structuring element to consider.
    
  Returns
  -------
  sink : array
    The smoothed sbinary source.
    
  Note
  ----
  The algorithm uses a sequence of 1d convoluions for speed, allowing only 
  rectangular like structuring elements.
  """
  ndim = source.ndim;
  if shape is None:
    shape = (3,) * ndim;
  
  filtered = source;
  for d in range(ndim):
    weights = np.ones(shape[d], dtype = int);
    temp = np.zeros(source.shape, dtype = 'uint8');
    ap.correlate1d(filtered, weights, sink=temp, axis=d, mode='constant', cval=0);
    filtered = temp;
  
  if sink is None:
    sink = np.array(source, dtype = bool);

  sink[filtered >= high] = True;
  sink[filtered < low] = False;
  
  return sink
Esempio n. 3
0
def clip(source, sink = None, clip_min = None, clip_max = None, clip_norm = None, processes = None, verbose = False):
  """Clip and normalize data.

  Arguments
  ---------
  source : array
      Input source.
  sink : array, dtype or None
      output sink or output data type, if None, a new array is allocated.
  clip_min : number
      Minimal number to clip source data to.
  clip_max : number
      Maximal number to clip source data to.
  clip_norm : number
      Normalization constant.

  Returns
  -------
  sink : array
      Clipped output.
  """
  processes, timer = ap.initialize_processing(verbose=verbose, processes=processes, function='clip');
  
  source, source_buffer = ap.initialize_source(source);

  if source.ndim != 3:
    raise ValueError('Source assumed to be 3d found %dd!' % source.ndim);
  
  if clip_min is None:
    clip_min = ap.io.min_value(source);
  
  if clip_max is None:
    clip_max = ap.io.max_value(source);
  
  if clip_norm is None:
    clip_norm = clip_max - clip_min;

  sink, sink_buffer = ap.initialize_sink(sink = sink, source = source);
                                            
  code.clip(source_buffer, sink_buffer, clip_min, clip_max, clip_norm, processes);
  
  return sink;
Esempio n. 4
0
def rank(source,
         sink=None,
         function=rnk.median,
         resample=None,
         verbose=False,
         out=sys.stdout,
         **kwargs):
    """Rank filter inbetween reshaping."""

    timer = tmr.Timer()

    sink, sink_buffer = ap.initialize_sink(sink=sink, source=source, order='F')

    if resample:
        interpolation = cv2.INTER_NEAREST
        new_shape = np.round(np.array(sink.shape, dtype=float) *
                             resample).astype(int)
        new_shape[2] = sink.shape[2]
        data = np.zeros(tuple(new_shape), order='F', dtype=source.dtype)
        new_shape = tuple(new_shape[1::-1])
        for z in range(source.shape[2]):
            data[:, :, z] = cv2.resize(src=source[:, :, z],
                                       dsize=new_shape,
                                       interpolation=interpolation)
        #print data.shape, data.dtype
        out.write(timer.elapsed_time(head='Rank filter: Resampling') + '\n')
    else:
        data = source

    #keys = inspect.getargspec(function).args;
    #kwargs = { k : v for k,v in kwargs.iteritems() if k in keys};

    data = function(data, **kwargs)

    out.write(
        timer.elapsed_time(head='Rank filter: %s' % function.__name__) + '\n')

    if resample:
        #interpolation = cv2.INTER_LINEAR;
        interpolation = cv2.INTER_AREA
        for z in range(sink.shape[2]):
            sink_buffer[:, :, z] = cv2.resize(src=data[:, :, z],
                                              dsize=sink.shape[1::-1],
                                              interpolation=interpolation)
        out.write(timer.elapsed_time(head='Rank filter: Upsampling') + '\n')
    else:
        sink_buffer[:] = data

    return sink
Esempio n. 5
0
def detect_maxima(source, h_max=None, shape=5, threshold=None, verbose=False):
    # extended maxima
    maxima = md.find_maxima(source,
                            h_max=h_max,
                            shape=shape,
                            threshold=threshold,
                            verbose=verbose)

    #center of maxima
    if h_max:
        centers = md.find_center_of_maxima(source,
                                           maxima=maxima,
                                           verbose=verbose)
    else:
        centers = ap.where(maxima).array

    return centers
Esempio n. 6
0
def detect_cells(
        source,
        sink=None,
        cell_detection_parameter=default_cell_detection_parameter,
        processing_parameter=default_cell_detection_processing_parameter):
    """Cell detection pipeline.
  
  Arguments
  ---------
  source : source specification
    The source of the stitched raw data.
  sink : sink specification or None
    The sink to write the result to. If None, an array is returned.
  cell_detection_parameter : dict
    Parameter for the binarization. See below for details.
  processing_parameter : dict
    Parameter for the parallel processing. 
    See :func:`ClearMap.ParallelProcessing.BlockProcesing.process` for 
    description of all the parameter.
  verbose : bool
    If True, print progress output.
  
  Returns
  -------
  sink : Source
    The result of the cell detection.
  
  Notes
  -----
  Effectively this function performs the following steps:
    * illumination correction via :func:`~ClearMap.ImageProcessing.IlluminationCorrection.correct_illumination`
    * background removal
    * difference of Gaussians (DoG) filter
    * maxima detection via :func:`~ClearMap.Analysis.Measurements.MaximaDetection.find_extended_maxima`
    * cell shape detection via :func:`~ClearMap.Analysis.Measurements.ShapeDetection.detect_shape`
    * cell intensity and size measurements via: :func:`~ClearMap.ImageProcessing.Measurements.ShapeDetection.find_intensity`,
      :func:`~ClearMap.ImageProcessing.Measurements.ShapeDetection.find_size`. 

  
  The parameters for each step are passed as sub-dictionaries to the 
    cell_detection_parameter dictionary.
  
  * If None is passed for one of the steps this step is skipped.
  
  * Each step also has an additional parameter 'save' that enables saving of 
    the result of that step to a file to inspect the pipeline.
  
  
  Illumination correction
  -----------------------
  illumination_correction : dict or None
    Illumination correction step parameter.

    flatfield : array or str 
      The flat field estimate for the image planes.
    
    background : array or None
      A background level to assume for the flatfield correction.
    
    scaling : float, 'max', 'mean' or None
      Optional scaling after the flat field correction.
    
    save : str or None
      Save the result of this step to the specified file if not None.
          
  See also :func:`ClearMap.ImageProcessing.IlluminationCorrection.correct_illumination`
  
  
  Background removal
  ------------------
  background_correction : dict or None
    Background removal step parameter.

    shape : tuple
      The shape of the structure lement to estimate the background.
      This should be larger than the typical cell size.
    
    form : str
      The form of the structur element (e.g. 'Disk')
        
    save : str or None
      Save the result of this step to the specified file if not None.
  
  Equalization
  ------------
  equalization : dict or None
    Equalization step parameter.
    See also :func:`ClearMap.ImageProcessing.LocalStatistics.local_percentile`
    
    precentile : tuple
      The lower and upper percentiles used to estimate the equalization.
      The lower percentile is used for normalization, the upper to limit the
      maximal boost to a maximal intensity above this percentile.
    
    max_value : float
      The maximal intensity value in the equalized image.
    
    selem : tuple
      The structural element size to estimate the percentiles. 
      Should be larger than the larger vessels.
    
    spacing : tuple
      The spacing used to move the structural elements.
      Larger spacings speed up processing but become locally less precise.
        
    interpolate : int
      The order of the interpoltation used in constructing the full 
      background estimate in case a non-trivial spacing is used.
      
    save : str or None
      Save the result of this step to the specified file if not None.
  
  
  DoG Filter
  ----------
  dog_filter : dict or None
    Difference of Gaussian filter step parameter.

    shape : tuple
      The shape of the filter.
      This should be near the typical cell size.
      
    sigma : tuple or None
       The std of the inner Gaussian.
       If None, detemined automatically from shape.
    
    sigma2 : tuple or None
       The std of the outer Gaussian.
       If None, detemined automatically from shape.
    
    save : str or None
      Save the result of this step to the specified file if not None.
  
  
  Maxima detection
  ----------------
  maxima_detection : dict or None
    Extended maxima detection step parameter.

    h_max : float or None
      The 'height'for the extended maxima.
      If None, simple local maxima detection isused.

    shape : tuple
      The shape of the structural element for extended maxima detection.
      This should be near the typical cell size.
    
    threshold : float or None
      Only maxima above this threshold are detected. If None, all maxima
      are detected.
      
    valid : bool
      If True, only detect cell centers in the valid range of the blocks with
      overlap.
    
    save : str or None
      Save the result of this step to the specified file if not None.
  
  
  Shape detection
  ---------------
  shape_detection : dict or None
    Shape detection step parameter.

    threshold : float
      Cell shape is expanded from maxima if pixles are above this threshold
      and not closer to another maxima.
    
    save : str or None
      Save the result of this step to the specified file if not None.
  
  
  Intensity detection
  -------------------
  intensity_detection : dict or None
    Intensity detection step parameter.

    method : {'max'|'min','mean'|'sum'}
      The method to use to measure the intensity of a cell.
      
    shape : tuple or None
      If no cell shapes are detected a disk of this shape is used to measure
      the cell intensity.
    
    save : str or None
      Save the result of this step to the specified file if not None.
  
  References
  ----------
  [1] Renier, Adams, Kirst, Wu et al., "Mapping of Brain Activity by Automated Volume Analysis of Immediate Early Genes.", Cell 165, 1789 (2016)
  [1] Kirst et al., "Mapping the Fine-Scale Organization and Plasticity of the Brain Vasculature", Cell 180, 780 (2020)
  """

    #initialize sink
    shape = io.shape(source)
    order = io.order(source)

    for key in cell_detection_parameter.keys():
        par = cell_detection_parameter[key]
        if isinstance(par, dict):
            filename = par.get('save', None)
            if filename:
                ap.initialize_sink(filename,
                                   shape=shape,
                                   order=order,
                                   dtype='float')

    cell_detection_parameter.update(
        verbose=processing_parameter.get('verbose', False))

    results, blocks = bp.process(detect_cells_block,
                                 source,
                                 sink=None,
                                 function_type='block',
                                 return_result=True,
                                 return_blocks=True,
                                 parameter=cell_detection_parameter,
                                 **processing_parameter)

    #merge results
    results = np.vstack([np.hstack(r) for r in results])

    #create column headers
    header = ['x', 'y', 'z']
    dtypes = [int, int, int]
    if cell_detection_parameter['shape_detection'] is not None:
        header += ['size']
        dtypes += [int]
    measures = cell_detection_parameter['intensity_detection']['measure']
    header += measures
    dtypes += [float] * len(measures)

    dt = {
        'names': header,
        'formats': dtypes
    }
    cells = np.zeros(len(results), dtype=dt)
    for i, h in enumerate(header):
        cells[h] = results[:, i]

    #save results
    return io.write(sink, cells)
Esempio n. 7
0
def connectPoint(data,
                 mask,
                 endpoints,
                 start_index,
                 radius,
                 tubeness=None,
                 min_quality=None,
                 remove_local_mask=True,
                 skeleton=None,
                 verbose=False,
                 **trace_parameter):
    """Tries to connect an end point"""

    #outine:
    # find neighbour end points and try to connect to nearest one
    # if path score good enough add path and remove two endpoints
    # else try to connect to binarized image
    # if path score good enugh connect to closest skeleton point
    # else not connectable

    #assumes everything is in fotran order
    strides = np.array(data.strides) / data.itemsize
    shape = data.shape
    #print strides, shape

    center_flat = endpoints[start_index]
    center_xyz = np.array(np.unravel_index(center_flat, data.shape, order='F'))

    mask_nbh = extractNeighbourhood(mask, center_xyz, radius)
    data_nbh = np.asarray(extractNeighbourhood(data, center_xyz, radius),
                          dtype=float,
                          order='F')
    shape_nbh = mask_nbh.shape

    center_nbh_xyz = np.zeros(3, dtype=int) + radius
    #center_nbh_flat = np.ravel_multi_index(center_nbh_xyz, shape_nbh, order = 'F');

    if tubeness is None:
        tubeness_nbh = cur.tubeness(
            ndi.gaussian_filter(np.asarray(data_nbh, dtype=float), sigma=1.0))
        tubeness_nbh = np.asarray(tubeness_nbh, order='F')
    else:
        tubeness_nbh = extractNeighbourhood(tubeness, center_xyz, radius)

    mask_nbh_label = np.empty(shape_nbh, dtype='int32', order='F')
    _ = ndi.label(mask_nbh,
                  structure=np.ones((3, 3, 3), dtype=bool),
                  output=mask_nbh_label)
    local_nbh = mask_nbh_label[tuple(center_nbh_xyz)] == mask_nbh_label

    # end point neighbours
    nbs_flat = ap.findNeighbours(endpoints, start_index, shape, strides,
                                 radius)

    if len(nbs_flat) > 0:
        nbs_nbh_xyz = np.vstack(np.unravel_index(
            nbs_flat, shape, order='F')).T - center_xyz + center_nbh_xyz
        nbs_nbh_flat = np.ravel_multi_index(nbs_nbh_xyz.T,
                                            shape_nbh,
                                            order='F')

        # remove connected neighbours
        non_local_nbh_flat = np.reshape(np.logical_not(local_nbh),
                                        -1,
                                        order='F')
        nbs_nbh_non_local_flat = nbs_nbh_flat[non_local_nbh_flat[nbs_nbh_flat]]

        if len(nbs_nbh_non_local_flat) > 0:
            #find nearest neighbour
            nbs_nbh_non_local_xyz = np.vstack(
                np.unravel_index(nbs_nbh_non_local_flat, shape, order='F')).T

            nbs_nbh_non_local_dist = nbs_nbh_non_local_xyz - center_nbh_xyz
            nbs_nbh_non_local_dist = np.sum(nbs_nbh_non_local_dist *
                                            nbs_nbh_non_local_dist,
                                            axis=1)

            neighbor_nbh_xyz = nbs_nbh_non_local_xyz[np.argmin(
                nbs_nbh_non_local_dist)]

            path, quality = trc.trace(data_nbh,
                                      tubeness_nbh,
                                      center_nbh_xyz,
                                      neighbor_nbh_xyz,
                                      verbose=False,
                                      returnQuality=True,
                                      **trace_parameter)

            if len(path) > 0:
                if quality / len(path) < min_quality:
                    if verbose:
                        print(
                            'Found good path to neighbour of length = %d with quality = %f (per length = %f) [%d / %d nonlocal neighbours]'
                            % (len(path), quality, quality / len(path),
                               len(nbs_nbh_non_local_flat), len(nbs_flat)))
                        #print path
                    return path + center_xyz - center_nbh_xyz, quality
                else:
                    if verbose:
                        print(
                            'Found bad  path to neighbour of length = %d with quality = %f (per length = %f) [%d / %d nonlocal neighbours]'
                            % (len(path), quality, quality / len(path),
                               len(nbs_nbh_non_local_flat), len(nbs_flat)))
                        #print path
            else:
                if verbose:
                    print(
                        'Found no path to neighbour [%d / %d nonlocal neighbours]'
                        % (len(nbs_nbh_non_local_flat), len(nbs_flat)))
                    #print path

    #tracing to neares neighbour failed
    if verbose:
        print('Found no valid path to neighbour, now tracing to binary!')
        #print path

    # Tracing to next binary
    if remove_local_mask:
        mask_nbh[local_nbh] = False

    distance_nbh = ndi.distance_transform_edt(np.logical_not(mask_nbh))
    distance_nbh = np.asarray(distance_nbh, order='F')

    path, quality = trc.traceToMask(data_nbh,
                                    tubeness_nbh,
                                    center_nbh_xyz,
                                    distance_nbh,
                                    verbose=False,
                                    returnQuality=True,
                                    **trace_parameter)

    if len(path) > 0:
        if quality / len(path) < min_quality:
            if verbose:
                print(
                    'Found good path to binary of length = %d with quality = %f (per length = %f)'
                    % (len(path), quality, quality / len(path)))
                #print path

            # trace to skeleton
            if skeleton is not None:
                #find closest point on skeleton
                final_xyz = path[0]
                skeleton_nbh = extractNeighbourhood(skeleton, center_xyz,
                                                    radius)
                local_end_path_nbh = mask_nbh_label[tuple(
                    final_xyz)] == mask_nbh_label
                skeleton_nbh_dxyz = np.vstack(
                    np.where(np.logical_and(skeleton_nbh,
                                            local_end_path_nbh))).T - final_xyz
                if len(
                        skeleton_nbh_dxyz
                ) == 0:  # could not find skeleton nearby -> give up for now
                    return path + center_xyz - center_nbh_xyz, quality

                skeleton_nbh_dist = np.sum(skeleton_nbh_dxyz *
                                           skeleton_nbh_dxyz,
                                           axis=1)
                closest_dxyz = skeleton_nbh_dxyz[np.argmin(skeleton_nbh_dist)]
                closest_xyz = closest_dxyz + final_xyz
                #print path[0], path[-1]
                #print center_nbh_xyz, closest_dxyz

                #generate pixel path
                max_l = np.max(np.abs(closest_dxyz)) + 1
                path_add_xyz = np.vstack([
                    np.asarray(np.linspace(f, c, max_l), dtype=int)
                    for f, c in zip(final_xyz, closest_xyz)
                ]).T
                path_add_flat = np.ravel_multi_index(path_add_xyz.T, shape_nbh)
                _, ids = np.unique(path_add_flat, return_index=True)
                path_add_xyz = path_add_xyz[ids]
                #print path_add_xyz;
                path = np.vstack([path, path_add_xyz])
                # note: this is not an ordered path anymore!

            return path + center_xyz - center_nbh_xyz, quality
        else:
            if verbose:
                print(
                    'Found bad  path to binary of length = %d with quality = %f (per length = %f)'
                    % (len(path), quality, quality / len(path)))
                #print path

    if verbose:
        print('Found no valid path to binary!')

    return np.zeros((0, 3)), 0
Esempio n. 8
0
def average(source,
            sink=None,
            shape=None,
            dtype=None,
            weights=None,
            indices=None,
            kernel=None,
            return_counts=False,
            processes=None,
            verbose=False):
    """Averages a list of points into an volumetric image array.
  
  Arguments
  ---------
  source : str, array or Source
    Source of point of nxd coordinates.
  sink : str, array or None
    The sink for the devolved image, if None return array.
  shape : tuple, str or None
    Shape of the final devolved data. If None, determine from points.
    If str, determine shape from the source at the specified location.
  dtype : dtype or None
    Optional data type of the sink.
  weights : array or None
    Weight array of length n for each point. If None, use uniform weights.  
  method : str
    Method for voxelization: 'sphere', 'rectangle' or 'pixel'.
  indices : array 
    The relative indices to the center to devolve over as nxd array.
  kernel : array
    Optional kernel weights for each index in indices.
  processes : int or None
    Number of processes to use.
  verbose : bool
    If True, print progress info.                        
 
  Returns
  -------
  sink : str, array
    Volumetric data of devolved point data.
  """
    processes, timer = ap.initialize_processing(processes=processes,
                                                verbose=verbose,
                                                function='devolve')

    #points, points_buffer = ap.initialize_source(points);
    points_buffer = io.read(source)
    if points_buffer.ndim == 1:
        points_buffer = points_buffer[:, None]

    if sink is None and shape is None:
        if points_buffer.ndim > 1:
            shape = tuple(
                int(math.ceil(points_buffer[:, d].max()))
                for d in range(points_buffer.shape[1]))
        else:
            shape = (int(math.ceil(points_buffer[:].max())), )
    elif isinstance(shape, str):
        shape = io.shape(shape)

    if sink is None and dtype is None:
        if weights is not None:
            dtype = io.dtype(weights)
        elif kernel is not None:
            kernel = np.asarray(kernel)
            dtype = kernel.dtype
        else:
            dtype = int

    sink, sink_buffer, sink_shape, sink_strides = ap.initialize_sink(
        sink=sink,
        shape=shape,
        dtype=dtype,
        return_shape=True,
        return_strides=True,
        as_1d=True)

    #TODO: initialize properly
    counts = np.zeros(sink_shape, dtype=int, order=sink.order)
    counts_buffer = counts.reshape(-1, order='A')
    #print(counts.shape, counts_buffer.shape)

    if indices is None:
        return sink
    indices = np.asarray(indices, dtype=int)
    if indices.ndim == 1:
        indices = indices[:, None]

    if kernel is not None:
        kernel = np.asarray(kernel, dtype=float)

    #print(kernel);
    #print(weights)
    #return;

    code.average(points_buffer, weights, indices, sink_buffer, sink_shape,
                 sink_strides, counts_buffer, processes)
    #  if weights is None:
    #    if kernel is None:
    #      code.devolve_uniform(points_buffer, indices, sink_buffer, sink_shape, sink_strides, processes);
    #    else:
    #      code.devolve_uniform_kernel(points_buffer, indices, kernel, sink_buffer, sink_shape, sink_strides, processes);
    #  else:
    #    if kernel is None:
    #      code.devolve_weights(points_buffer, weights, indices, sink_buffer, sink_shape, sink_strides, processes);
    #    else:
    #      code.devolve_weights_kernel(points_buffer, weights, indices, kernel, sink_buffer, sink_shape, sink_strides, processes);
    #TODO: move to code
    good = counts_buffer > 0
    sink_buffer[good] /= counts_buffer[good]

    ap.finalize_processing(verbose=verbose, function='devolve', timer=timer)

    if return_counts:
        return sink, counts
    else:
        return sink
Esempio n. 9
0
def binarize(source, sink = None, binarization_parameter = default_binarization_parameter, processing_parameter = default_binarization_processing_parameter):
  """Multi-path binarization of iDISCO+ cleared vasculature data.
  
  Arguments
  ---------
  source : source specification
    The source of the stitched raw data.
  sink : sink specification or None
    The sink to write the result to. If None, an array is returned.
  binarization_parameter : dict
    Parameter for the binarization. See below for details.
  processing_parameter : dict
    Parameter for the parallel processing. 
    See :func:`ClearMap.ParallelProcessing.BlockProcesing.process` for 
    description of all the parameter.
  verbose : bool
    If True, print progress output.
  
  Returns
  -------
  sink : Source
    The result of the binarization.
  
  Notes
  -----
  * The binarization pipeline is composed of several steps. The parameters for
    each step are passed as sub-dictionaries to the binarization_parameter 
    dictionary.
  
  * If None is passed for one of the steps this step is skipped.
  
  * Each step also has an additional parameter 'save' that enables saving of 
    the result of that step to a file to inspect the pipeline.
    
  General parameter
  -----------------
  binary_status : str or None
    File name to save the information about which part of the multi-path 
    binarization contributed to the final result.
    
  max_bin : int
    Number of intensity levels to use for the data after preprocessing.
    Higher values will increase the intensity resolution but slow down
    processing. 
    
    For the vasculature a typical value is 2**12.

  Clipping
  --------
  clip : dict or None
    Clipping and mask generation step parameter.

    clip_range : tuple 
      The range to clip the raw data as (lowest, highest)
      Voxels above lowest define the foregournd mask used 
      in the following steps.
      
      For the vasculature a typical value is (400,60000). 
      
    save : str or None
      Save the result of this step to the specified file if not None.
          
  See also :mod:`ClearMap.ImageProcessing.Clipping.Clipping`
      
  Lightsheet correction
  ---------------------  
  lightsheet : dict or None
    Lightsheet correction step parameter.
   
    percentile : float
      Percentile in [0,1] used to estimate the lightshieet artifact.
      
      For the vasculature a typical value is 0.25.
      
    lightsheet : dict
      Parameter for the ligthsheet artifact percentile estimation. 
      See :func:`ClearMap.ImageProcessing.LightsheetCorrection.correct_lightsheet`
      for list of all parameters. The crucial parameter is
      
      selem : tuple
        The structural element shape used to estimate the stripe artifact.
        It should match the typical lenght, width, and depth of the artifact 
        in the data.
        
        For the vasculature a typical value is (150,1,1).
    
    background : dict
      Parameter for the background estimation in the light sheet correction. 
      See :func:`ClearMap.ImageProcessing.LightsheetCorrection.correct_lightsheet`
      for list of all parameters. The crucial parameters are
      
      selem : tuple
        The structural element shape used to estimate the background.
        It should be bigger than the largest vessels,
        
        For the vasculature a typical value is (200,200,1).

      spacing : tuple
        The spacing to use to estimate the background. Larger spacings speed up
        processing but become less local estimates.
        
        For the vasculature a typical value is (25,25,1)
        
      step : tuple
        This parameter enables to subsample from the entire array defined by
        the structural element using larger than single voxel steps.
        
        For the vasculature a typical value is (2,2,1).
        
      interpolate : int
        The order of the interpoltation used in constructing the full 
        background estimate in case a non-trivial spacing is used.
        
        For the vasculature a typical value is 1.
        
    lightsheet_vs_background : float
      The background is multiplied by this weight before comparing to the
      lightsheet artifact estimate. 
      
      For the vasculature a typical value is 2.
    
    save : str or None
      Save the result of this step to the specified file if not None.

  Median filter
  -------------
  median : dict or None
    Median correction step parameter.
    See :func:`ClearMap.ImageProcessing.Filter.Rank.median` for all parameter.
    The important parameters are

    selem : tuple
      The structural element size for the median filter.
      
      For the vascualture a typical value is (3,3,3).
    
    save : str or None
      Save the result of this step to the specified file if not None.  
  
  Pseudo Deconvolution
  --------------------
  deconvolve : dict
    The deconvolution step parameter.
    
    sigma : float
      The std of a Gaussina filter applied to the high intensity pixel image.
      The number should reflect the scale of the halo effect seen around high
      intensity structures.
      
      For the vasculature a typical value is 10.
    
    save : str or None
      Save the result of this step to the specified file if not None.   
      
    threshold : float 
      Voxels above this threshold will be added to the binarization result
      in the multi-path biniarization.
      
      For the vasculature a typical value is 750.
  
  Adaptive Thresholding
  ---------------------
  adaptive : dict or None
    Adaptive thresholding step parameter.
    A local ISODATA threshold is estimated.
    See also :mod:`ClearMap.ImageProcessing.LocalStatistics`.
    
    selem : tuple
      The structural element size to estimate the percentiles. 
      Should be larger than the larger vessels.
      
      For the vasculature a typical value is (200,200,5).
    
    spacing : tuple
      The spacing used to move the structural elements.
      Larger spacings speed up processing but become locally less precise.
               
      For the vasculature a typical value is (50,50,5)
        
    interpolate : int
      The order of the interpoltation used in constructing the full 
      background estimate in case a non-trivial spacing is used.
      
      For the vasculature a typical value is 1.
      
    save : str or None
      Save the result of this step to the specified file if not None.   


  Equalization
  ------------
  equalize : dict or None
    Equalization step parameter.
    See also :func:`ClearMap.ImageProcessing.LocalStatistics.local_percentile`
    
    precentile : tuple
      The lower and upper percentiles used to estimate the equalization.
      The lower percentile is used for normalization, the upper to limit the
      maximal boost to a maximal intensity above this percentile.
    
      For the vasculature a typical value is (0.4, 0.975).
    
    max_value : float
      The maximal intensity value in the equalized image.
               
      For the vasculature a typical value is 1.5. 
    
    selem : tuple
      The structural element size to estimate the percentiles. 
      Should be larger than the larger vessels.
      
      For the vasculature a typical value is (200,200,5).
    
    spacing : tuple
      The spacing used to move the structural elements.
      Larger spacings speed up processing but become locally less precise.
               
      For the vasculature a typical value is (50,50,5)
        
    interpolate : int
      The order of the interpoltation used in constructing the full 
      background estimate in case a non-trivial spacing is used.
      
      For the vasculature a typical value is 1.
      
    save : str or None
      Save the result of this step to the specified file if not None.   
      
    threshold : float 
      Voxels above this threshold will be added to the binarization result
      in the multi-path biniarization.
      
      For the vasculature a typical value is 1.1.

  Tube filter
  -----------
  vesselize : dict
    The tube filter step parameter.
    
    background : dict or None
      Parameters to correct for local background. See 
      :func:`ClearMap.ImageProcessing.Filter.Rank.percentile`.
      If None, no background correction is done before the tube filter.
      
      selem : tuple
        The structural element specification to estimate the percentiles. 
        Should be larger than the largest vessels intended to be 
        boosted by the tube filter.
        
        For the vasculature a typical value is ('disk', (30,30,1)) .

      percentile : float
        Percentile in [0,1] used to estimate the background.

        For the vasculature a typical value is 0.5.
        
    tubness : dict
      Parameters used for the tube filter. See 
      :func:`ClearMap.ImageProcessing.Differentiation.Hessian.lambda123`.
      
      sigma : float
        The scale of the vessels to boos in the filter.
        
        For the vasculature a typical value is 1.0.
  
    save : str or None
      Save the result of this step to the specified file if not None.   
      
    threshold : float 
      Voxels above this threshold will be added to the binarization result
      in the multi-path biniarization.
      
      For the vasculature a typical value is 120.
  
  Binary filling
  --------------
  fill : dict or None
    If not None, apply a binary filling the binarized result.

  For the vasculature this step is set to None and done globally 
  in the postprocessing step.
  
  Binary smoothing
  ----------------
  smooth : dict or None
    The smoothing parameter passed to 
    :func:`ClearMap.ImageProcessing.Binary.Smoothing.smooth_by_configuration`.
  
  For the vasculature this step is set to None and done globally 
  in the postprocessing step.
  
  References
  ----------
  [1] C. Kirst et al., "Mapping the Fine-Scale Organization and Plasticity of the Brain Vasculature", Cell 180, 780 (2020)
  """
    
  #initialize sink
  shape = io.shape(source);
  order = io.order(source);
  sink, sink_buffer = ap.initialize_sink(sink=sink, shape=shape, order=order, dtype=bool); #, memory='shared');
  
  #initialize addition output sinks  
  binary_status = binarization_parameter.get('binary_status', None);
  if binary_status:
    ap.initialize_sink(binary_status, source=sink, shape=shape, order=order, dtype='uint16');

  for key in binarization_parameter.keys():
    par = binarization_parameter[key];
    if isinstance(par, dict):
      filename = par.get('save', None);
      if filename:
        ap.initialize_sink(filename, shape=shape, order=order, dtype='float');
        
  binarization_parameter.update(verbose=processing_parameter.get('verbose', False));
  
  bp.process(binarize_block, source, sink, function_type='block', parameter=binarization_parameter, **processing_parameter)                   
  
  return sink;                
Esempio n. 10
0
def postprocess(source, sink = None, postprocessing_parameter = default_postprocessing_parameter, processing_parameter = default_postprocessing_processing_parameter, processes = None, verbose = True):
  """Postprocess a binarized image.
  
  Arguments
  ---------
  source : source specification
    The binary  source.
  sink : sink specification or None
    The sink to write the postprocesses result to. 
    If None, an array is returned.
  postprocessing_parameter : dict
    Parameter for the postprocessing.
  processing_parameter : dict
    Parameter for the parallel processing.
  verbose : bool
    If True, print progress output.
  
  Returns
  -------
  sink : Source
    The result of the binarization.
    
  Notes
  -----
  * The postporcessing pipeline is composed of several steps. The parameters
    for each step are passed as sub-dictionaries to the 
    postprocessing_parameter dictionary.
  
  * If None is passed for one of the steps the step is skipped.
    
  Smoothing
  ---------
  smooth : dict or None
    Smoothing step parameter. See
    :func:`ClearMap.ImageProcessing.Binary.Smoothing.smooth_by_configuration`

    iterations : int
      Number of smoothing iterations.
      
      For the vasculature a typical value is 6.
  
  Filling
  -------
  fill : bool or None
    If True, fill holes in the binary data.
  """
  
  source = io.as_source(source);  
  sink   = ap.initialize_sink(sink, shape=source.shape, dtype=source.dtype, order=source.order, return_buffer=False);
  
  if verbose:
    timer = tmr.Timer();
    print('Binary post processing: initialized.');
  
  postprocessing_parameter = postprocessing_parameter.copy();
  parameter_smooth = postprocessing_parameter.pop('smooth', None);
  parameter_fill   = postprocessing_parameter.pop('fill', None);
  #print(parameter_smooth, parameter_fill)
  
  #smoothing
  save = None;
  if parameter_smooth:
    #intialize temporary files if needed
    if parameter_fill:
      save = parameter_smooth.pop('save', None);
      temporary_filename = save; 
      if temporary_filename is None:
        temporary_filename = postprocessing_parameter['temporary_filename'];
      if temporary_filename is None:
        temporary_filename = tmpf.mktemp(prefix='TubeMap_Vasculature_postprocessing', suffix='.npy');
      sink_smooth   = ap.initialize_sink(temporary_filename, shape=source.shape, dtype=source.dtype, order=source.order, return_buffer=False);
    else:
      sink_smooth = sink;
    
    #run smoothing
    source_fill = bs.smooth_by_configuration(source, sink=sink_smooth, processing_parameter=processing_parameter, processes=processes, verbose=verbose, **parameter_smooth);
  
  else:
    source_fill = source;
  
  if parameter_fill:
    sink = bf.fill(source_fill, sink=sink, processes=processes, verbose=verbose);
    
    if parameter_smooth and save is None:
      io.delete_file(temporary_filename);
  else:
    sink = source_fill;
  
  if verbose:
    timer.print_elapsed_time('Binary post processing');
  
  gc.collect()
  return None;
Esempio n. 11
0
def _test():
  import numpy as np  
  import ClearMap.ParallelProcessing.DataProcessing.ArrayProcessing as ap 
  
  from importlib import reload
  reload(ap)
  
  
  ## Lookup table processing
  
  #apply_lut  
  x = np.random.randint(0, 100, size=(20,30));
  lut = np.arange(100) + 1;
  y = ap.apply_lut(x, lut)
  assert np.all(y == x+1)

  #apply_lut_to_index
  import ClearMap.ImageProcessing.Topology.Topology3d as t3d
  kernel = t3d.index_kernel(dtype=int);
  
  import ClearMap.ImageProcessing.Binary.Smoothing as sm
  lut = sm.initialize_lookup_table()
    
  data = np.random.randint(0, 2, (1500,300,400), dtype = bool)
  
  #reload(ap)
  result = ap.apply_lut_to_index(data, kernel, lut, sink=None, verbose=True)

  import ClearMap.Visualization.Plot3d as p3d
  p3d.plot([data, result])    
  
  
  ## Correlation 
  
  #correlate1d
  #reload(ap)
  axis = 1;
  kernel = np.array(range(11), dtype='uint32');  
  data = np.random.randint(0, 2**27, (1000, 1500,100), dtype='uint32');

  corr = ap.correlate1d(data, kernel, axis=axis, verbose=True, processes=10);
  
  import scipy.ndimage as ndi
  import ClearMap.Utils.Timer as tmr
  timer = tmr.Timer();
  corr_ndi = ndi.correlate1d(data, kernel, axis=axis, mode='constant',cval=0);
  timer.print_elapsed_time('ndi')  
  
  assert np.allclose(corr, corr_ndi)
  
  

#
#
#
#
#
#default_blocks_per_process = 10;
#"""Default number of blocks per process to split the data.
#
#Note
#----
#10 blocks per process is a good choice.
#"""
#
#default_cutoff = 20000000;
#"""Default size of array below which ordinary numpy is used.
#
#Note
#----
#Ideally test this on your machine for different array sizes.
#"""
#
#
#
#def blockRanges(data, blocks = None,  processes = defaultProcesses):
#  """Ranges of evenly spaced blocks in array
#  
#  Arguments:
#    data : array
#      array to divide in blocks
#    blocks : int or None
#      number of blocks to split array into
#    processes : None or int
#      number of processes, if None use number of cpus
#    
#  Returns:
#    array
#      list of the range boundaries
#  """
#  if processes is None:
#    processes = defaultProcesses;
#  if blocks is None:
#    blocks = processes * defaultBlocksPerProcess;
#   
#  d = data.reshape(-1, order = 'A'); 
#  blocks = min(blocks, d.shape[0]);
#  return np.array(np.linspace(0,  d.shape[0], blocks + 1), dtype = int);
#
#
#def blockSums(data, blocks = None, processes = defaultProcesses):
#  """Sums of evenly spaced blocks in array
#  
#  Arguments:
#    data : array
#      array to perform the block sums on
#    blocks : int or None
#      number of blocks to split array into
#    processes : None or int
#      number of processes, if None use number of cpus
#    
#  Returns:
#    array
#      sums of the values in the different blocks
#  """
#  if processes is None:
#    processes = defaultProcesses;
#  if blocks is None:
#    blocks = processes * defaultBlocksPerProcess;
#  
#  d = data.reshape(-1, order = 'A');
#  if data.dtype == bool:
#    d = d.view('uint8')
#  
#  return code.blockSums1d(d, blocks = blocks, processes = processes);
#  
#
#def where(data, out = None, blocks = None, cutoff = defaultCutoff, processes = defaultProcesses):
#  """Returns the indices of the non-zero entries of the array
#  
#  Arguments:
#    data : array
#      array to search for nonzero indices
#    out : array or None
#      if not None results is written into this array
#    blocks : int or None
#      number of blocks to split array into for parallel processing
#    cutoff : int
#      number of elements below whih to switch to numpy.where
#    processes : None or int
#      number of processes, if None use number of cpus
#    
#  Returns:
#    array
#      positions of the nonzero entries of the input array
#  
#  Note:
#    Uses numpy.where if there is no match of dimension implemented!
#  """ 
#  if data.ndim != 1 and data.ndim != 3:
#    raise Warning('Using numpy where for dimension %d and type %s!' % (data.ndim, data.dtype))
#    return np.vstack(np.where(data)).T;
#
#  if cutoff is None:
#    cutoff = 1;
#  cutoff = min(1, cutoff);
#  if data.size <= cutoff:
#    return np.vstack(np.where(data)).T;
#
#  if processes is None:
#    processes = defaultProcesses;
#  if blocks is None:
#    blocks = processes * defaultBlocksPerProcess;
#  
#  if data.dtype == bool:
#    d = data.view('uint8')
#  else:
#    d = data;
#  
#  if out is None:
#    if d.ndim == 1:
#      sums = code.blockSums1d(d, blocks = blocks, processes = processes);
#    else:
#      sums = code.blockSums3d(d, blocks = blocks, processes = processes);
#    out = np.squeeze(np.zeros((np.sum(sums), data.ndim), dtype = np.int));
#  else:
#    sums = None;
#  
#  if d.ndim == 1:
#    code.where1d(d, out = out, sums = sums, blocks = blocks, processes = processes);
#  else: # d.ndim == 3:
#    code.where3d(d, out = out, sums = sums, blocks = blocks, processes = processes);
#    
#  return out;
#
#
#
#
#def setValue(data, indices, value, cutoff = defaultCutoff, processes = defaultProcesses):
#  """Set value at specified indices of an array
#  
#  Arguments:
#    data : array
#      array to search for nonzero indices
#    indices : array or None
#      list of indices to set
#    value : numeric or bool
#      value to set elements in data to
#    processes : None or int
#      number of processes, if None use number of cpus
#    
#  Returns:
#    array
#      array with specified entries set to new value
#  
#  Note:
#    Uses numpy if there is no match of dimension implemented!
#  """
#  if data.ndim != 1:
#    raise Warning('Using numpy where for dimension %d and type %s!' % (data.ndim, data.dtype))
#    data[indices] = value;
#    return data;
#    
#  if cutoff is None:
#    cutoff = 1;
#  cutoff = min(1, cutoff);
#  if data.size <= cutoff:
#    data[indices] = value;
#    return data;
#  
#  if processes is None:
#    processes = defaultProcesses;
#  
#  if data.dtype == bool:
#    d = data.view('uint8')
#  else:
#    d = data;
#  
#  code.set1d(d, indices, value, processes = processes);
#  
#  return data;
#
#
#def setArray(data, indices, values, cutoff = defaultCutoff, processes = defaultProcesses):
#  """Set value at specified indices of an array
#  
#  Arguments:
#    data : array
#      array to search for nonzero indices
#    indices : array or None
#      list of indices to set
#    values : array
#      values to set elements in data to
#    processes : None or int
#      number of processes, if None use number of cpus
#    
#  Returns:
#    array
#      array with specified entries set to new value
#  
#  Note:
#    Uses numpy if there is no match of dimension implemented!
#  """
#  if data.ndim != 1:
#    raise Warning('Using numpy where for dimension %d and type %s!' % (data.ndim, data.dtype))
#    data[indices] = values;
#    return data;
#    
#  if cutoff is None:
#    cutoff = 1;
#  cutoff = min(1, cutoff);
#  if data.size <= cutoff:
#    data[indices] = values;
#    return data;
#  
#  if processes is None:
#    processes = defaultProcesses;
#  
#  if data.dtype == bool:
#    d = data.view('uint8')
#  else:
#    d = data;
#  
#  code.set1darray(d, indices, values, processes = processes);
#  
#  return data;
#
#
#
#def take(data, indices, out = None, cutoff = defaultCutoff, processes = defaultProcesses):
#  """Extracts the values at specified indices
#  
#  Arguments:
#    data : array
#      array to search for nonzero indices
#    out : array or None
#      if not None results is written into this array
#    cutoff : int
#      number of elements below whih to switch to numpy.where
#    processes : None or int
#      number of processes, if None use number of cpus
#    
#  Returns:
#    array
#      positions of the nonzero entries of the input array
#  
#  Note:
#    Uses numpy data[indices] if there is no match of dimension implemented!
#  """ 
#  if data.ndim != 1:
#    raise Warning('Using numpy where for dimension %d and type %s!' % (data.ndim, data.dtype))
#    return data[indices];
#
#  if cutoff is None:
#    cutoff = 1;
#  cutoff = min(1, cutoff);
#  if data.size < cutoff:
#    return data[indices];
#
#  if processes is None:
#    processes = defaultProcesses;
#  
#  if data.dtype == bool:
#    d = data.view('uint8')
#  else:
#    d = data;
#
#  if out is None:
#    out = np.empty(len(indices), dtype = data.dtype);
#  if out.dtype == bool:
#    o = out.view('uint8');
#  else:
#    o = out;
#  
#  code.take1d(d, indices, o, processes = processes);
#  
#  return out;
#
#
#def match(match, indices, out = None):
#  """Matches a sorted list of 1d indices to another larger one 
#  
#  Arguments:
#    match : array
#      array of indices to match to indices
#    indices : array or None
#      array of indices
#  
#  Returns:
#    array
#      array with specified entries set to new value
#  
#  Note:
#    Uses numpy if there is no match of dimension implemented!
#  """
#  if match.ndim != 1:
#    raise ValueError('Match array dimension required to be 1d, found %d!' % (match.ndim))
#  if indices.ndim != 1:
#    raise ValueError('Indices array dimension required to be 1d, found %d!' % (indices.ndim))  
#  
#  if out is None:
#    out = np.empty(len(match), dtype = match.dtype);
#  
#  code.match1d(match, indices, out);
#  
#  return out;
#
#
# Find neighbours in an index list
#
#
#def neighbours(indices, offset, processes = defaultProcesses):
#  """Returns all pairs of indices that are apart a specified offset"""
#  return code.neighbours(indices, offset = offset,  processes = processes);
#
#
#def findNeighbours(indices, center, shape, strides, mask):
#  """Finds all indices within a specified kernel region centered at a point"""
#  
#  if len(strides) != 3 or len(shape) != 3 or (strides[0] != 1 and strides[2] != 1):
#    raise RuntimeError('only 3d C or F contiguous arrays suported');
#
#  if isinstance(mask, int):
#    mask = (mask,);
#  if isinstance(mask, tuple):
#    mask = mask * 3;
#    return code.neighbourlistRadius(indices, center, shape[0], shape[1], shape[2], 
#                                                     strides[0], strides[1], strides[2], 
#                                                     mask[0], mask[1], mask[2]);
#  else:
#    if mask.dtype == bool:
#      mask = mask.view(dtype = 'uint8');
#                                                
#    return code.neighbourlistMask(indices, center, shape[0], shape[1], shape[2], strides[0], strides[1], strides[2], mask);
# 
# Loading and saving
#
#def readNumpyHeader(filename):
#  """Read numpy array information including offset to data
#  
#  Arguments:
#    filename : str
#      file name of the numpy file
#      
#  Returns:
#    shape : tuple
#      shape of the array
#    dtype : dtype
#      data type of array 
#    order : str
#      'C' for c and 'F' for fortran order
#    offset : int
#      offset in bytes to data buffer in file
#  """
#  with open(filename, 'rb') as fhandle:
#    major, minor = np.lib.format.read_magic(fhandle);
#    shape, fortran, dtype = np.lib.format.read_array_header_1_0(fhandle);
#    offset = fhandle.tell()
#  
#  order = 'C';
#  if fortran:
#    order = 'F';
#    
#  return (shape, dtype, order, offset)
# 
# 
#def _offsetFromSlice(sourceSlice, order = 'F'):
#  """Checks if slice is compatible with the large data loader and returns z coordiante"""
#   
#  if order == 'C':
#    os = 1; oe = 3; oi = 0;
#  else:
#    os = 0; oe = 2; oi = 2;
#  
#  for s in sourceSlice[os:oe]:
#    if s.start is not None or s.stop is not None or s.step is not None:
#        raise RuntimeError('sub-regions other than in slowest dimension %d not supported!  slice = %r' % (oi, sourceSlice))
#  
#  s = sourceSlice[oi];
#  if s.step is not None:
#      raise RuntimeError('sub-regions with non unity steps not supported')
#  
#  if s.start is None:
#    s = 0;
#  else:
#    s = s.start;
#    
#  return s;
#
#
#def load(filename, region = None, shared = False, blocks = None, processes = cpu_count(), verbose = False):
#  """Load a large npy array into memory in parallel
#  
#  Arguments:
#    filename : str
#      filename of array to load
#    region : Region or None
#      if not None this specifies the sub-region to read
#    shared : bool
#      if True read into shared memory
#    blocks : int or None
#      number of blocks to split array into for parallel processing
#    processes : None or int
#      number of processes, if None use number of cpus
#    verbose : bool
#      print info about the file to be loaded
#    
#  Returns:
#    array 
#      the data as numpy array
#  """
#  if processes is None:
#    processes = cpu_count();
#  if blocks is None:
#    blocks = processes * defaultBlocksPerProcess;
#  
#  #get specs from header specs
#  shape, dtype, order, offset = readNumpyHeader(filename);
#  if verbose:
#    timer = tmr.Timer();
#    print('Loading array of shape = %r, dtype = %r, order = %r, offset = %r' %(shape, dtype, order, offset)); 
#  
#  if region is not None:
#    shape = region.shape();  
#    sourceSlice = region.sourceSlice();
#    off = _offsetFromSlice(sourceSlice, order = order);
#  
#  if shared:
#    data = shm.create(shape, dtype = dtype, order = order);
#  else:
#    data = np.empty(shape, dtype = dtype, order = order);
#  
#  d = data.reshape(-1, order = 'A');
#  if dtype == bool:
#    d = d.view('uint8');  
#  
#  if region is not None:
#    if order == 'F':
#      offset += data.strides[-1] * off;  
#    else:
#      offset += data.strides[1] * off;  
#  
#  code.load(data = d, filename = filename, offset = offset, blocks = blocks, processes = processes);
#  
#  if verbose:
#    timer.printElapsedTime(head = 'Loading array from %s' % filename);
#           
#  return data;
#
#
#
#
#def save(filename, data, region = None, blocks = None, processes = cpu_count(), verbose = False):
#  """Save a large npy array to disk in parallel
#  
#  Arguments:
#    filename : str
#      filename of array to load
#    data : array
#      array to save to disk
#    blocks : int or None
#      number of blocks to split array into for parallel processing
#    processes : None or int
#      number of processes, if None use number of cpus
#    verbose : bool
#      print info about the file to be loaded
#    
#  Returns:
#    str 
#      the filename of the numpy array on disk
#  """
#  if processes is None:
#    processes = cpu_count();
#  if blocks is None:
#    blocks = processes * defaultBlocksPerProcess;
#  
#  if region is None:
#    #create file on disk via memmap
#    memmap = np.lib.format.open_memmap(filename, mode = 'w+', shape = data.shape, dtype = data.dtype, fortran_order = np.isfortran(data));
#    memmap.flush();
#    del(memmap);
#  
#  #get specs from header specs
#  shape, dtype, order, offset = readNumpyHeader(filename);
#  if verbose:
#    timer = tmr.Timer();
#    print('Saving array of shape = %r, dtype = %r, order = %r, offset = %r' %(shape, dtype, order, offset)); 
#  
#  if (np.isfortran(data) and order != 'F') or (not np.isfortran(data) and order != 'C'):
#    raise RuntimeError('Order of arrays do not match isfortran=%r and order=%s' % (np.isfortran(data), order));
#  
#  d = data.reshape(-1, order = 'A');
#  if dtype == bool:
#    d = d.view('uint8');
#    
#  if region is not None:
#    sourceSlice = region.sourceSlice();
#    off = _offsetFromSlice(sourceSlice, order = order);
#    if order == 'F':
#      offset += data.strides[-1] * off;
#    else:
#      offset += data.strides[1] * off;
#  
#  #print d.dtype, filename, offset, blocks, processes
#  
#  code.save(data = d, filename = filename, offset = offset, blocks = blocks, processes = processes);
#  
#  if verbose:
#    timer.printElapsedTime(head = 'Saving array to %s' % filename);
#           
#  return filename;
#
#
#
#
#
#
#if __name__ == "__main__":
#  
#  import numpy as np
#  from ClearMap.Utils.Timer import Timer;
#  import ClearMap.DataProcessing.LargeData as ld
#  reload(ld)
#  
#  
#  #dat = np.random.rand(2000,2000,1000) > 0.5;
#  #dat = np.random.rand(1000,1000,500) > 0.5;
#  dat = np.random.rand(200,300,400) > 0.5;  
#  #datan = io.MMP.writeData('test.npy', dat);
#  
#  dat = np.load('data.npy')
#  xyz1 = np.load('points.npy')
#  
#  s = ld.sum(dat)
#  print(s == np.sum(s))
#
#
#  timer = Timer();
#  xyz = ld.where(dat)
#  timer.printElapsedTime('parallel')
#  #parallel: elapsed time: 0:00:25.807
#  
#  timer = Timer();
#  xyz1 = np.vstack(np.where(dat)).T
#  timer.printElapsedTime('numpy')
#  #numpy: elapsed time: 0:05:45.590
#  
#  
#  d0 = np.zeros(dat.shape, dtype = bool);
#  d1 = np.zeros(dat.shape, dtype = bool);
#  
#  d0[xyz[:,0], xyz[:,1], xyz[:,2]] = True;
#  d1[xyz1[:,0], xyz1[:,1], xyz1[:,2]] = True;
#  np.all(d0 == d1)
#  
#  dat2 = np.array(np.random.rand(1000, 1000, 1000) > 0, dtype = 'bool');
#  filename = 'test.npy';
#  np.save(filename, dat2)
#  
#  filename = '/disque/raid/vasculature/4X-test2/170824_IgG_2/170824_IgG_16-23-46/rank_threshold.npy'
#  
#  timer = Timer();
#  ldat = ld.load(filename, verbose = True);
#  timer.printElapsedTime('load')
#  #load: elapsed time: 0:00:04.867
#  
#  timer = Timer(); 
#  ldat2 = np.load(filename);  
#  timer.printElapsedTime('numpy')
#  #numpy: elapsed time: 0:00:27.982
#  
#  np.all(ldat == ldat2)
#  
#  timer = Timer();
#  xyz = ld.where(ldat)
#  timer.printElapsedTime('parallel')
#  #parallel: elapsed time: 0:07:25.698
#  
#  lldat = ldat.reshape(-1, order = 'A')
#  timer = Timer();
#  xyz = ld.where(lldat)
#  timer.printElapsedTime('parallel 1d')
#  #parallel 1d: elapsed time: 0:00:49.034
#  
#  timer = Timer();
#  xyz = np.where(ldat)
#  timer.printElapsedTime('numpy')
#  
#  
#  import os
#  #os.remove(filename)
#  
#  filename = './ClearMap/Test/Skeletonization/test_bin.npy';
#  timer = Timer();
#  ldat = ld.load(filename, shared = True, verbose = True);
#  timer.printElapsedTime('load')
#  
#  ld.shm.isShared(ldat);
#  
#  
#  
#  import numpy as np
#  from ClearMap.Utils.Timer import Timer;
#  import ClearMap.DataProcessing.LargeData as ld
#  reload(ld)
#  
#  filename = 'test_save.npy';
#  
#  dat = np.random.rand(100,200,100);
#  
#  ld.save(filename, dat)
#  
#  
#  dat2 = ld.load(filename)
#  
#  np.all(dat == dat2)
#  
#  os.remove(filename)
#  
#  
#    
#  import numpy as np
#  from ClearMap.Utils.Timer import Timer;
#  import ClearMap.DataProcessing.LargeData as ld
#  reload(ld)
#  
#  dat = np.zeros(100, dtype = bool);
#  dat2 = dat.copy();
#  
#  indices = np.array([5,6,7,8,13,42])  
#  
#  ld.setValue(dat, indices, True, cutoff = 0);
#  
#  dat2[indices] = True;
#  np.all(dat2 == dat)
#  
#  d = ld.take(dat, indices, cutoff = 0)
#  np.all(d)
#  
#  
#  import numpy as np
#  from ClearMap.Utils.Timer import Timer;
#  import ClearMap.DataProcessing.LargeData as ld
#  reload(ld)
#  
#  
#  pts = np.array([0,1,5,6,10,11], dtype = int);
#  
#  ld.neighbours(pts, -10)
#  
#  
#  import numpy as np
#  from ClearMap.Utils.Timer import Timer;
#  import ClearMap.DataProcessing.LargeData as ld
#  import ClearMap.ImageProcessing.Filter.StructureElement as sel;
#  reload(ld)
#  
#  dat = np.random.rand(30,40,50) > 0.5;
#  mask = sel.structureElement('Disk', (5,5,5));
#  indices = np.where(dat.reshape(-1))[0];
#  c_id = len(indices)/2;
#  c = indices[c_id];
#  xyz = np.unravel_index(c, dat.shape)
#  l = np.array(mask.shape)/2
#  r = np.array(mask.shape) - l;
#  dlo = [max(0,xx-ll) for xx,ll in zip(xyz,l)];
#  dhi = [min(xx+rr,ss) for xx,rr,ss in zip(xyz,r, dat.shape)]
#  mlo = [-min(0,xx-ll) for xx,ll in zip(xyz,l)];
#  mhi = [mm + min(0, ss-xx-rr) for xx,rr,ss,mm in zip(xyz,r, dat.shape, mask.shape)]
#  
#  nbh = dat[dlo[0]:dhi[0], dlo[1]:dhi[1], dlo[2]:dhi[2]];
#  nbhm = np.logical_and(nbh, mask[mlo[0]:mhi[0], mlo[1]:mhi[1], mlo[2]:mhi[2]] > 0);
#  nxyz = np.where(nbhm);
#  nxyz = [nn + dl for nn,dl in zip(nxyz, dlo)];
#  nbi = np.ravel_multi_index(nxyz, dat.shape);
#  
#  nbs = ld.findNeighbours(indices, c_id , dat.shape, dat.strides, mask)
#  
#  nbs.sort();
#  print np.all(nbs == nbi)
#  
#  
#  dat = np.random.rand(30,40,50) > 0.5;
#  indices = np.where(dat.reshape(-1))[0];
#  c_id = len(indices)/2;
#  c = indices[c_id];
#  xyz = np.unravel_index(c, dat.shape)
#  l = np.array([2,2,2]);
#  r = l + 1;
#  dlo = [max(0,xx-ll) for xx,ll in zip(xyz,l)];
#  dhi = [min(xx+rr,ss) for xx,rr,ss in zip(xyz, r, dat.shape)]  
#  nbh = dat[dlo[0]:dhi[0], dlo[1]:dhi[1], dlo[2]:dhi[2]];
#  nxyz = np.where(nbh);
#  nxyz = [nn + dl for nn,dl in zip(nxyz, dlo)];
#  nbi = np.ravel_multi_index(nxyz, dat.shape);
#  
#  nbs = ld.findNeighbours(indices, c_id , dat.shape, dat.strides, tuple(l))
#  
#  nbs.sort();
#  print np.all(nbs == nbi)
#  
#  print nbs
#  print nbi
#  
#  
#  import numpy as np
#  from ClearMap.Utils.Timer import Timer;
#  import ClearMap.DataProcessing.LargeData as ld
#  reload(ld)
#  
#  data = np.random.rand(100);
#  values =np.random.rand(50);
#  indices = np.arange(50);
#  ld.setArray(data, indices, values, cutoff = 1)
#  print np.all(data[:50] == values)
#  
#  import numpy as np
#  from ClearMap.Utils.Timer import Timer;
#  import ClearMap.DataProcessing.LargeData as ld
#  reload(ld)
#  
#  m = np.array([1,3,6,7,10]);
#  i = np.array([1,2,3,4,6,7,8,9]);
#  
#  o = ld.match(m,i)
#  
#  o2 = [np.where(i==l)[0][0] for l in m]
#
#
#  
Esempio n. 12
0
def _test():
    #%%
    import numpy as np
    import scipy.ndimage as ndi
    import ClearMap.DataProcessing.LargeData as ld
    import ClearMap.Visualization.Plot3d as p3d
    import ClearMap.DataProcessing.ConvolvePointList as cpl
    import ClearMap.ImageProcessing.Skeletonization.Topology3d as t3d
    import ClearMap.ImageProcessing.Skeletonization.SkeletonCleanUp as scu

    import ClearMap.ImageProcessing.Tracing.Connect as con
    reload(con)

    data = np.load('/home/ckirst/Desktop/data.npy')
    binary = np.load('/home/ckirst/Desktop/binarized.npy')
    skel = np.load('/home/ckirst/Desktop/skel.npy')
    #points = np.load('/home/ckirst/Desktop/pts.npy');

    data = np.copy(data, order='F')
    binary = np.copy(binary, order='F')
    skel = np.copy(skel, order='F')
    skel_copy = np.copy(skel, order='F')
    points = np.ravel_multi_index(np.where(skel), skel.shape, order='F')

    skel, points = scu.cleanOpenBranches(skel,
                                         skel_copy,
                                         points,
                                         length=3,
                                         clean=True)
    deg = cpl.convolve3DIndex(skel, t3d.n26, points)

    ends, isolated = con.findEndpoints(skel, points, border=25)
    special = np.sort(np.hstack([ends, isolated]))

    ends_xyz = np.array(np.unravel_index(ends, data.shape, order='F')).T
    isolated_xyz = np.array(np.unravel_index(isolated, data.shape,
                                             order='F')).T
    special_xyz = np.vstack([ends_xyz, isolated_xyz])

    #%%
    import ClearMap.ParallelProcessing.SharedMemoryManager as smm
    data_s = smm.asShared(data, order='F')
    binary_s = smm.asShared(binary.view('uint8'), order='F')
    skel_s = smm.asShared(skel.view('uint8'), order='F')

    smm.clean()
    res = con.addConnections(data_s,
                             binary_s,
                             skel_s,
                             points,
                             radius=20,
                             start_points=None,
                             add_to_skeleton=True,
                             add_to_mask=True,
                             verbose=True,
                             processes=4,
                             debug=False,
                             block_size=10)

    skel_s = skel_s.view(bool)
    binary_s = binary_s.view(bool)

    #%%
    mask_img = np.asarray(binary, dtype=int, order='A')
    mask_img[:] = mask_img + binary_s
    mask_img[:] = mask_img + skel

    data_img = np.copy(data, order='A')
    data_img[skel] = 120

    mask_img_f = np.reshape(mask_img, -1, order='A')
    data_img_f = np.reshape(data_img, -1, order='A')

    mask_img_f[res] = 7
    data_img_f[res] = 512

    mask_img_f[special] = 8
    data_img_f[special] = 150

    for d in [3, 4, 5]:
        mask_img_f[points[deg == d]] = d + 1

    try:
        con.viewer[0].setSource(mask_img)
        con.viewer[1].setSource(data_img)
    except:
        con.viewer = p3d.plot([mask_img, data_img])

    con.viewer[0].setMinMax([0, 8])
    con.viewer[1].setMinMax([24, 160])

    #%%
    mask = binary
    data_new = np.copy(data, order='A')
    data_new[skel] = 120

    skel_new = np.asarray(skel, dtype=int, order='A')
    skel_new[:] = skel_new + binary

    binary_new = np.copy(binary, order='A')
    qs = []
    for i, e in enumerate(special):
        print('------')
        print('%d / %d' % (i, len(special)))
        path, quality = con.connectPoint(data,
                                         mask,
                                         special,
                                         i,
                                         radius=25,
                                         skeleton=skel,
                                         tubeness=None,
                                         remove_local_mask=True,
                                         min_quality=15.0,
                                         verbose=True,
                                         maxSteps=15000,
                                         costPerDistance=1.0)

        #print path, quality
        if len(path) > 0:
            qs.append(quality * 1.0 / len(path))

            q = con.addPathToMask(skel_new, path, value=7)
            q = con.addPathToMask(data_new, path, value=512)
            binary_new = con.addDilatedPathToMask(binary_new,
                                                  path,
                                                  iterations=1)

    skel_new[:] = skel_new + binary_new
    q = con.addPathToMask(skel_new, special_xyz, value=6)
    for d in [3, 4, 5]:
        xyz = np.array(
            np.unravel_index(points[deg == d], data.shape, order='F')).T
        q = con.addPathToMask(skel_new, xyz, value=d)
    q = con.addPathToMask(data_new, special_xyz, value=150)

    try:
        con.viewer[0].setSource(skel_new)
        con.viewer[1].setSource(data_new)
    except:
        con.viewer = p3d.plot([skel_new, data_new])

    con.viewer[0].setMinMax([0, 8])
    con.viewer[1].setMinMax([24, 160])

    #%%
    import matplotlib.pyplot as plt
    plt.figure(1)
    plt.clf()
    #plt.plot(qs);
    plt.hist(qs)

    #%%
    i = 20
    i = 21
    i = 30
    i = 40
    r = 25
    center = np.unravel_index(ends[i], data.shape)
    print(center, data.shape)
    mask = binary
    path = con.tracePointToMask(data,
                                mask,
                                center,
                                radius=r,
                                points=special_xyz,
                                plot=True,
                                skel=skel,
                                binary=binary,
                                tubeness=None,
                                removeLocalMask=True,
                                maxSteps=None,
                                verbose=False,
                                costPerDistance=0.0)

    #%%

    nbs = ap.findNeighbours(ends, i, skel.shape, skel.strides, r)
    center = np.unravel_index(ends[i], skel.shape)

    nbs_xyz = np.array(np.unravel_index(nbs, skel.shape)).T
    dists = nbs_xyz - center
    dists = np.sum(dists * dists, axis=1)

    nb = np.argmin(dists)

    center = np.unravel_index(ends[i], data.shape)
    print(center, data.shape)
    mask = binary
    path = con.tracePointToNeighbor(data,
                                    mask,
                                    center,
                                    nbs_xyz[nb],
                                    radius=r,
                                    points=special_xyz,
                                    plot=True,
                                    skel=skel,
                                    binary=binary,
                                    tubeness=None,
                                    removeLocalMask=True,
                                    maxSteps=None,
                                    verbose=False,
                                    costPerDistance=0.0)

    #%%

    import ClearMap.ImageProcessing.Filter.FilterKernel as fkr
    dog = fkr.filterKernel('DoG', size=(13, 13, 13))
    dv.plot(dog)

    data_filter = ndi.correlate(np.asarray(data, dtype=float), dog)
    data_filter -= data_filter.min()
    data_filter = data_filter / 3.0
    #dv.dualPlot(data, data_filter);

    #%%add all paths
    reload(con)

    r = 25
    mask = binary
    data_new = data.copy()
    data_new[skel] = 120

    skel_new = np.asarray(skel, dtype=int)
    skel_new = skel_new + binary

    binary_new = binary.copy()

    for i, e in enumerate(special):
        center = np.unravel_index(e, data.shape)

        print(i, e, center)
        path = con.tracePointToMask(data,
                                    mask,
                                    center,
                                    radius=r,
                                    points=special_xyz,
                                    plot=False,
                                    skel=skel,
                                    binary=binary,
                                    tubeness=None,
                                    removeLocalMask=True,
                                    maxSteps=15000,
                                    costPerDistance=1.0)

        q = con.addPathToMask(skel_new, path, value=7)
        q = con.addPathToMask(data_new, path, value=512)
        binary_new = con.addDilatedPathToMask(binary_new, path, iterations=1)

    q = con.addPathToMask(skel_new, special_xyz, value=6)
    for d in [3, 4, 5]:
        xyz = np.array(np.unravel_index(points[deg == d], data.shape)).T
        q = con.addPathToMask(skel_new, xyz, value=d)
    q = con.addPathToMask(data_new, special_xyz, value=150)

    skel_new = skel_new + binary_new
    try:
        con.viewer[0].setSource(skel_new)
        con.viewer[1].setSource(data_new)
    except:
        con.viewer = dv.dualPlot(skel_new, data_new)

    con.viewer[0].setMinMax([0, 8])
    con.viewer[1].setMinMax([24, 160])

    #%%

    import ClearMap.ImageProcessing.Skeletonization.Skeletonize as skl

    skel_2 = skl.skeletonize3D(binary_new.copy())

    #%%

    np.save('/home/ckirst/Desktop/binarized_con.npy', binary_new)
    #%%

    # write image

    import ClearMap.IO.IO as io

    #r = np.asarray(128 * binary_new, dtype = 'uint8');
    #g = r.copy(); b = r.copy();
    #r[:] = r + 127 * skel_2[0];
    #g[:] = g - 128 * skel_2[0];
    #b[:] = b - 128 * skel_2[0];
    #img = np.stack((r,g,b), axis = 3)

    img = np.asarray(128 * binary_new, dtype='uint8')
    img[:] = img + 127 * skel_2[0]

    io.writeData('/home/ckirst/Desktop/3d.tif', img)
Esempio n. 13
0
def graph_from_skeleton(skeleton, points = None, radii = None, vertex_coordinates = True, 
                        check_border = True, delete_border = False, verbose = False):
  """Converts a binary skeleton image to a graph-tool graph.
  
  Arguments
  ---------
  skeleton : array
    Source with 2d/3d binary skeleton.
  points : array
    List of skeleton points as 1d indices of flat skeleton array (optional to save processing time).
  radii  : array
    List of radii associated with each vertex.
  vertex_coordinates : bool
    If True, store coordiantes of the vertices / edges.
  check_border : bool
    If True, check if the boder is empty. The algorithm reuqires this.
  delete_border : bool
    If True, delete the border.
  verbose : bool
    If True, print progress information.
    
  Returns
  -------
  graph : Graph class
    The graph corresponding to the skeleton. 
    
  Note
  ----
  Edges are detected between neighbouring foreground pixels using 26-connectivty.
  """
  skeleton = io.as_source(skeleton);
  
  if delete_border:
    skeleton = t3d.delete_border(skeleton);
    check_border = False;
  
  if check_border:
    if not t3d.check_border(skeleton):
      raise ValueError('The skeleton array needs to have no points on the border!');  

  
  if verbose:
    timer = tmr.Timer();
    timer_all = tmr.Timer();
    print('Graph from skeleton calculation initialize.!');
  
  if points is None:
    points = ap.where(skeleton.reshape(-1, order = 'A')).array;
    
    if verbose:
      timer.print_elapsed_time('Point list generation');
      timer.reset();
  
  #create graph
  n_vertices = points.shape[0];
  g = ggt.Graph(n_vertices=n_vertices, directed=False);
  g.shape = skeleton.shape;
    
  if verbose:
    timer.print_elapsed_time('Graph initialized with %d vertices' % n_vertices);
    timer.reset();
  
  #detect edges
  edges_all = np.zeros((0,2), dtype = int);
  for i,o in enumerate(t3d.orientations()):
    # calculate off set
    offset = np.sum((np.hstack(np.where(o))-[1,1,1]) * skeleton.strides) 
    edges = ap.neighbours(points, offset);
    if len(edges) > 0:
      edges_all = np.vstack([edges_all, edges]);
      
    if verbose:
      timer.print_elapsed_time('%d edges with orientation %d/13 found' % (edges.shape[0], i+1));
      timer.reset();
  
  if edges_all.shape[0] > 0:
    g.add_edge(edges_all);
    
  if verbose:
    timer.print_elapsed_time('Added %d edges to graph' % (edges_all.shape[0]));
    timer.reset();

  if vertex_coordinates:
    vertex_coordinates = np.array(np.unravel_index(points, skeleton.shape, order=skeleton.order)).T;
    g.set_vertex_coordinates(vertex_coordinates);
  
  if radii is not None:
    g.set_vertex_radius(radii);  
  
  if verbose:
    timer_all.print_elapsed_time('Skeleton to Graph');

  return g;
Esempio n. 14
0
def temp():
    import scipy.ndimage as ndi
    corr_ndi = ndi.correlate1d(data,
                               kernel,
                               axis=axis,
                               mode='constant',
                               cval=0)

    assert np.allclose(corr.array, corr_ndi)

    c = corr.array
    c[0, :, 0]
    corr_ndi[0, :, 0]

    data = np.array(np.random.rand(1000, 1000, 500), order='F')

    data = np.array(np.random.rand(300, 400, 1500), order='F')
    kernel = np.array([1, 2, 3, 4, 5])

    import ClearMap.Utils.Timer as tmr
    timer = tmr.Timer()
    for axis in range(3):
        corr = ap.correlate1d(data,
                              kernel,
                              axis=axis,
                              verbose=False,
                              processes=None)
    timer.print_elapsed_time('ap')

    import ClearMap.Utils.Timer as tmr
    timer = tmr.Timer()
    for axis in range(3):
        corr2 = ap2.correlate1d(data,
                                kernel,
                                axis=axis,
                                verbose=False,
                                processes=None)
    timer.print_elapsed_time('ap')

    import scipy.ndimage as ndi
    timer = tmr.Timer()
    for axis in range(3):
        corr_ndi = ndi.correlate1d(data,
                                   kernel,
                                   axis=axis,
                                   mode='constant',
                                   cval=0)
    timer.print_elapsed_time('ndi')

    assert np.allclose(corr.array, corr_ndi)
    assert np.allclose(corr2.array, corr_ndi)

    # IO
    import ClearMap.ParallelProcessing.DataProcessing.ArrayProcessing as ap
    import numpy as np
    reload(ap)

    data = np.random.rand(10, 200, 10)

    sink = ap.write('test.npy', data, verbose=True)
    assert (np.all(sink.array == data))

    read = ap.read('test.npy', verbose=True)
    assert (np.all(read.array == data))

    ap.io.delete_file('test.npy')

    # where
    reload(ap)
    data = np.random.rand(30, 20, 40) > 0.5

    where_np = np.array(np.where(data)).T
    where = ap.where(data, cutoff=2**0)

    check_np = np.zeros(data.shape, dtype=bool)
    check = np.zeros(data.shape, dtype=bool)
    check_np[tuple(where_np.T)] = True
    check[tuple(where.array.T)] = True
    assert (np.all(check_np == check))
Esempio n. 15
0
def skeletonize(source,
                sink=None,
                points=None,
                method='PK12i',
                steps=None,
                in_place=False,
                verbose=True,
                **kwargs):
    """Skeletonize 3d binary arrays.
  
  Arguments
  ---------
  source : array or source 
    Binary image to skeletonize.
  sink : sink specification
    Optional sink.
  points : array or None
    Optional point list of the foreground points in the binary.
  method : str
    'PK12' or faster index version 'PK12i'.
  steps : int or None
    Number of maximal iteration steps. If None, maximal thinning.
  in_place : bool
    If True, the skeletonization is done directly on the input array.
    
  Returns
  -------
  skeleton : Source
    The skeletonized array.
  """
    if verbose:
        timer = tmr.Timer()

    if not in_place and io.is_file(source):
        binary_buffer = ap.read(source).as_buffer()
    else:
        binary, binary_buffer = ap.initialize_source(source)
        if not in_place:
            binary_buffer = np.array(binary_buffer)

    if method == 'PK12':
        result = PK12.skeletonize(binary_buffer,
                                  points=points,
                                  steps=steps,
                                  verbose=verbose,
                                  **kwargs)
    elif method == 'PK12i':
        result = PK12.skeletonize_index(binary_buffer,
                                        points=points,
                                        steps=steps,
                                        verbose=verbose,
                                        **kwargs)
    else:
        raise RuntimeError('Skeletonizaton method %r is not valid!' % method)

    if verbose:
        timer.print_elapsed_time(head='Skeletonization')

    if sink is None:
        sink = ap.io.as_source(result)
    elif isinstance(sink, str):
        sink = ap.write(sink, result)
    else:
        sink = io.write(sink, result)
    return sink
Esempio n. 16
0
def _test():
    import numpy as np
    import ClearMap.ImageProcessing.Topology.Topology3d as top

    from importlib import reload
    reload(top)

    label = top.cube_labeled()
    top.print_cube(label)

    # Test rotations
    c = np.zeros((3, 3, 3), dtype=bool)
    c[1, 0, 0] = True
    top.print_cube(c)

    cs = [top.rotate(c, axis=2, steps=r) for r in range(4)]
    [top.print_cube(cc) for cc in cs]

    reload(top)
    l = top.cube_labeled()
    rts = top.rotations6(l)

    [top.print_cube(r) for r in rts]

    reload(top)
    b = top.cube_from_index(6)
    i = top.cube_to_index(b)
    print(i, 6)

    us = np.zeros((3, 3, 3), dtype=int)
    us[1, 1, 2] = 1
    us[1, 0, 1] = 1
    us[1, 2, 0] = 2

    r12 = top.rotations12(us)
    [top.print_cube(cc) for cc in r12]

    #check configuration utlity
    reload(top)
    index = 11607
    source = top.cube_from_index(index)

    c = top.index_from_binary(source)
    c[1, 1, 1] == index

    x = np.random.rand(1500, 500, 500) > 0.6
    c = top.index_from_binary(x)

    import numpy as np
    import ClearMap.ImageProcessing.Topology.Topology3d as top

    #check fortran vs c order
    x = np.random.rand(5, 5, 5) > 0.35
    y = np.asanyarray(x, order='F')

    ix = top.index_from_binary(x)
    iy = top.index_from_binary(y)

    ax = ix.array
    ay = iy.array

    #%% profile
    import io
    io.DEFAULT_BUFFER_SIZE = 2**32

    import pstats, cProfile

    import numpy as np
    import ClearMap.ImageProcessing.Topology.Topology3d as top

    x = np.ones((3000, 500, 1000), dtype=bool, order='F')

    import ClearMap.IO.IO as io
    import ClearMap.ParallelProcessing.DataProcessing.ArrayProcessing as ap
    ap.write('test.npy', x)

    y = io.as_source('test.npy')
    z = io.create('resuly.npy', shape=y.shape, order='C', dtype='uint32')

    cProfile.runctx(
        "c =top.index_from_binary(y, method='!shared', sink=z, verbose=True, processes=None)",
        globals(), locals(), "Profile.prof")

    s = pstats.Stats("Profile.prof")
    s.strip_dirs().sort_stats("time").print_stats()

    import mmap
    mmap.ACCESS_COPY
Esempio n. 17
0
def skeletonize_index(binary,
                      points=None,
                      steps=None,
                      removals=False,
                      radii=False,
                      return_points=False,
                      check_border=True,
                      delete_border=False,
                      verbose=True):
    """Skeletonize a binary 3d array using PK12 algorithm via index coordinates.
  
  Arguments
  ---------
  binary : array
    Binary image to be skeletonized. 
  steps : int or None
    Number of maximal iteration steps. If None, use maximal reduction.
  removals :bool
    If True, returns the steps in which the pixels in the input data 
    were removed.
  radii :bool
    If True, the estimate of the local radius is returned.
  verbose :bool
    If True, print progress info.
    
  Returns
  -------
  skeleton : array
    The skeleton of the binary input.
  points : nxd array
    The point coordinates of the skeleton.
  """

    if verbose:
        print('#############################################################')
        print('Skeletonization PK12 [convolution, index]')
        timer = tmr.Timer()

    #TODO: make this work for any memmapable source
    if not isinstance(binary, np.ndarray):
        raise ValueError('Numpy array required for binary in skeletonization!')
    if binary.ndim != 3:
        raise ValueError('The binary array dimension is %d, 3 is required!' %
                         binary.ndim)

    if delete_border:
        binary = t3d.delete_border(binary)
        check_border = False

    if check_border:
        if not t3d.check_border(binary):
            raise ValueError(
                'The binary array needs to have not points on the border!')

    binary_flat = binary.reshape(-1, order='A')

    # detect points
    if points is None:
        points = ap.where(binary_flat).array
    npoints = points.shape[0]

    if verbose:
        timer.print_elapsed_time('Foreground points: %d' % (points.shape[0], ))

    if removals is True or radii is True:
        #birth = np.zeros(binary.shape, dtype = 'uint16');
        order = 'C'
        if binary.flags.f_contiguous:
            order = 'F'
        death = np.zeros(binary.shape, dtype='uint16', order=order)
        deathflat = death.reshape(-1, order='A')
        with_info = True
    else:
        with_info = False

    # iterate
    if steps is None:
        steps = -1
    step = 1
    nnonrem = 0
    while True:
        if verbose:
            print(
                '#############################################################'
            )
            print('Iteration %d' % step)
            timer_iter = tmr.Timer()

        print(type(points), points.dtype, binary.dtype)
        border = cpl.convolve_3d_indices_if_smaller_than(
            binary, t3d.n6, points, 6)
        borderpoints = points[border]
        #borderids    = np.nonzero(border)[0];
        borderids = ap.where(border).array
        keep = np.ones(len(border), dtype=bool)
        if verbose:
            timer_iter.print_elapsed_time('Border points: %d' %
                                          (len(borderpoints), ))

        #if info is not None:
        #  b = birth[borderpoints[:,0], borderpoints[:,1], borderpoints[:,2]];
        #  bids = b == 0;
        #  birth[borderpoints[bids,0], borderpoints[bids,1], borderpoints[bids,2]] = step;

        # sub iterations
        remiter = 0
        for i in range(12):
            if verbose:
                print(
                    '-------------------------------------------------------------'
                )
                print('Sub-Iteration %d' % i)
                timer_sub_iter = tmr.Timer()

            remborder = delete[cpl.convolve_3d_indices(binary, rotations[i],
                                                       borderpoints)]
            rempoints = borderpoints[remborder]
            if verbose:
                timer_sub_iter.print_elapsed_time('Matched points  : %d' %
                                                  (len(rempoints), ))

            binary_flat[rempoints] = 0
            keep[borderids[remborder]] = False
            rem = len(rempoints)
            remiter += rem

            #death times
            if with_info is True:
                #remo = np.logical_not(keep);
                deathflat[rempoints] = 12 * step + i

            if verbose:
                timer_sub_iter.print_elapsed_time('Sub-Iteration %d' % (i, ))

        if verbose:
            print(
                '-------------------------------------------------------------'
            )

        #update foregroud
        points = points[keep]

        if step % 3 == 0:
            npts = len(points)
            points = points[consider[cpl.convolve_3d_indices(
                binary, base, points)]]
            nnonrem += npts - len(points)
            if verbose:
                print('Non-removable points: %d' % (npts - len(points)))

        if verbose:
            print('Foreground points   : %d' % points.shape[0])

        if verbose:
            print(
                '-------------------------------------------------------------'
            )
            timer_iter.print_elapsed_time('Iteration %d' % (step, ))

        step += 1
        if steps >= 0 and step >= steps:
            break
        if remiter == 0:
            break

    if verbose:
        print('#############################################################')
        timer.print_elapsed_time('Skeletonization done')
        print('Total removed:   %d' % (npoints - (len(points) + nnonrem)))
        print('Total remaining: %d' % (len(points) + nnonrem))

    if radii is True or return_points is True:
        points = ap.where(binary_flat).array

    if radii is True:
        #calculate average diameter as death average death of neighbourhood
        radii = cpl.convolve_3d_indices(death,
                                        t3d.n18,
                                        points,
                                        out_dtype='uint16')
    else:
        radii = None

    result = [binary]
    if return_points:
        result.append(points)
    if removals is True:
        result.append(death)
    if radii is not None:
        result.append(radii)

    if len(result) > 1:
        return tuple(result)
    else:
        return result[0]
Esempio n. 18
0
def skeletonize(binary,
                points=None,
                steps=None,
                removals=False,
                radii=False,
                check_border=True,
                delete_border=False,
                return_points=False,
                verbose=True):
    """Skeletonize a binary 3d array using PK12 algorithm.
  
  Arguments
  ---------
  binary : array
    Binary image to skeletonize.
  points : array or None.
    Optional list of points in the binary to speed up processing.
  steps : int or None
    Number of maximal iteration steps (if None maximal reduction).
  removals : bool
    If True, returns also the steps at which the pixels in the input data 
    where removed. 
  radii : bool
    If True, the estimate of the local radius is returned.
  check_border : bool
    If True, check if the boder is empty. The algorithm reuqires this.
  delete_border : bool
    If True, delete the border.
  verbose : bool
    If True print progress info.
    
  Returns
  -------
  skeleton : array
    The skeleton of the binary.
  points : array
    The point coordinates of the skeleton nx3
  
  Note
  ----
  The skeletonization is done in place on the binary. Copy the binary if
  needed for further processing.
  """

    if verbose:
        print('#############################################################')
        print('Skeletonization PK12 [convolution]')
        timer = tmr.Timer()

    #TODO: make this work for any memmapable source !
    if not isinstance(binary, np.ndarray):
        raise ValueError('Numpy array required for binary in skeletonization!')
    if binary.ndim != 3:
        raise ValueError('The binary array dimension is %d, 3 is required!' %
                         binary.ndim)

    if delete_border:
        binary = t3d.delete_border(binary)
        check_border = False

    if check_border:
        if not t3d.check_border(binary):
            raise ValueError(
                'The binary array needs to have no points on the border!')

    # detect points
    #points = np.array(np.nonzero(binary)).T;
    if points is None:
        points = ap.where(binary).array

    if verbose:
        timer.print_elapsed_time(head='Foreground points: %d' %
                                 (points.shape[0], ))

    if removals is True or radii is True:
        #birth = np.zeros(binary.shape, dtype = 'uint16');
        death = np.zeros(binary.shape, dtype='uint16')
        with_info = True
    else:
        with_info = False

    # iterate
    if steps is None:
        steps = -1
    step = 1
    removed = 0
    while True:
        if verbose:
            print(
                '#############################################################'
            )
            print('Iteration %d' % step)
            timer_iter = tmr.Timer()

        border = cpl.convolve_3d_points(binary, t3d.n6, points) < 6
        borderpoints = points[border]
        borderids = np.nonzero(border)[0]
        keep = np.ones(len(border), dtype=bool)
        if verbose:
            timer_iter.print_elapsed_time('Border points: %d' %
                                          (len(borderpoints), ))

        #if info is not None:
        #  b = birth[borderpoints[:,0], borderpoints[:,1], borderpoints[:,2]];
        #  bids = b == 0;
        #  birth[borderpoints[bids,0], borderpoints[bids,1], borderpoints[bids,2]] = step;

        # sub iterations
        remiter = 0
        for i in range(12):
            if verbose:
                print(
                    '-------------------------------------------------------------'
                )
                print('Sub-Iteration %d' % i)
                timer_sub_iter = tmr.Timer()

            remborder = delete[cpl.convolve_3d_points(binary, rotations[i],
                                                      borderpoints)]
            rempoints = borderpoints[remborder]
            if verbose:
                timer_sub_iter.print_elapsed_time('Matched points: %d' %
                                                  (len(rempoints), ))

            binary[rempoints[:, 0], rempoints[:, 1], rempoints[:, 2]] = 0
            keep[borderids[remborder]] = False
            rem = len(rempoints)
            remiter += rem
            removed += rem
            if verbose:
                print('Deleted points: %d' % (rem))
                timer_sub_iter.print_elapsed_time('Sub-Iteration %d' % (i))

            #death times
            if with_info is True:
                #remo = np.logical_not(keep);
                death[rempoints[:, 0], rempoints[:, 1],
                      rempoints[:, 2]] = 12 * step + i

        #update foreground
        points = points[keep]
        if verbose:
            print('Foreground points: %d' % points.shape[0])

        if verbose:
            print(
                '-------------------------------------------------------------'
            )
            timer_iter.print_elapsed_time('Iteration %d' % (step, ))

        step += 1
        if steps >= 0 and step >= steps:
            break
        if remiter == 0:
            break

    if verbose:
        print('#############################################################')
        print('Total removed:   %d' % (removed))
        print('Total remaining: %d' % (len(points)))
        timer.print_elapsed_time('Skeletonization')

    result = [binary]
    if return_points:
        result.append(points)
    if removals is True:
        result.append(death)
    if radii is True:
        #calculate average diameter as average death of neighbourhood
        radii = cpl.convolve_3d(death, np.array(t3d.n18, dtype='uint16'),
                                points)
        result.append(radii)

    if len(result) > 1:
        return tuple(result)
    else:
        return result[0]
Esempio n. 19
0
def fill_vessels(source, sink, 
                 resample = None, threshold = 0.5,
                 network = None, dtype = 'float16', cuda = None,
                 processing_parameter = None,
                 verbose = False):
  """Fill hollow tubes via a neural network.
  
  Arguments
  ---------
  source : str or Source
    The binary data source to fill hollow tubes in.
  sink : str or Source.
    The binary sink to write data to. sink is created if it does not exists.
  resample : int or None
    If int, downsample the data by this factor, apply network and upsample.
  threshold : float or None
    Apply a threshold to the result of the cnn. If None, the probability of
    being foreground is returned.
  network : str, Model or None
    The network speicifcation. If None, the default trained network is used.
  dtype : str
    The dtype to use for the network. See 
    :func:`ClearMap.ImageProcessing.MachineLearning.Torch.to` for details.
  cuda : bool or None
    If True, use gpu processing. If None, automatically detect gpu.
  processing_parameter : dict or None
    Parameter to use for block processing.
  verbose : bool
    If True, print progress.
  
  Returns
  -------
  network : Model
    The neural network model.
  """
  if verbose:
    timer = tmr.Timer();

  #cuda
  if cuda is None:
    cuda = torch.cuda.is_available();
    
  #initialize network
  network = vessel_filling_network(network=network, dtype=dtype, cuda=cuda);
  if not cuda:  #some functions only work as float on CPU
    network = network.float();
  if verbose:
    timer.print_elapsed_time('Vessel filling: neural network initialized')
    print(network);
    print('Vessel filling: using %s' % (('gpu' if cuda else 'cpu'),))
  
  #initialize source
  source = io.as_source(source);
 
  if verbose:
    timer.print_elapsed_time('Vessel filling: source loaded');
    
  #initialize sink
  if threshold:
    sink_dtype = bool;
  else:
    sink_dtype = dtype;
  sink, sink_shape = ap.initialize_sink(sink=sink, shape=source.shape, dtype=sink_dtype, order=source.order, return_buffer=False, return_shape=True);
  
  #resampling
  if resample is not None:
    maxpool = torch.nn.MaxPool3d(kernel_size=resample)
    upsample = torch.nn.Upsample(mode="trilinear", scale_factor=resample, align_corners=False);
    
    if cuda:
      maxpool = maxpool.cuda();
      upsample = upsample.cuda();
      if dtype is not None:
        maxpool  = tor.to(maxpool, dtype);
        upsample = tor.to(upsample, dtype);
    else:
      maxpool = maxpool.float();
      upsample = upsample.float();
  
  #processing
  if processing_parameter is None:
    processing_parameter = default_fill_vessels_processing_parameter
  if processing_parameter:
    processing_parameter = processing_parameter.copy();
    processing_parameter.update(optimization=False);
    if 'size_max' not in processing_parameter or processing_parameter['size_max'] is None:
      processing_parameter['size_max'] = np.max(source.shape);
    if 'size_min' not in processing_parameter:
      processing_parameter['size_min'] = None;    
    blocks = bp.split_into_blocks(source, **processing_parameter);  
  else:
    blocks = [source];
  
  #process blocks
  for block in blocks:
    if verbose:
      timer_block = tmr.Timer();
      print('Vessel filling: processing block %s' % (block.info()));
    
    #load data
    data = np.array(block.array);
    if data.dtype == bool:
      data = data.astype('uint8');
    data = torch.unsqueeze(torch.from_numpy(data), 0)
    if cuda:
      data = tor.to(data, dtype=dtype);
      data = data.cuda();
    else:
      data = data.float();
    if verbose:
      print('Vessel filling: loaded data: %r' % (tuple(data.shape),));
    
    #down sampleprocessing_parameter
    if resample:        
      result = maxpool(data);
    else:
      result = data; 
    result = torch.unsqueeze(result, 1);  
    if verbose:
      print('Vessel filling: resampled data: %r' % (tuple(result.shape),));
    
    #fill
    result = network(result);
    if verbose:
      print('Vessel filling: network %r' % (tuple(result.shape),));
    
    #upsample
    if resample:
      result = upsample(result)
    if verbose:
      print('Vessel filling: upsampled %r' % (tuple(result.shape),));
      
    #write data
    sink_slicing = block.slicing;
    result_shape = result.shape;
    result_slicing = tuple(slice(None,min(ss, rs)) for ss,rs in zip(sink_shape, result_shape[2:]));
    data_slicing = (0,) + tuple(slice(None, s.stop) for s in result_slicing);
    sink_slicing = bp.blk.slc.sliced_slicing(result_slicing, sink_slicing, sink_shape);  
    result_slicing = (0,0) + result_slicing;

    #print('result', result.shape, result_slicing, 'data', data.shape, data_slicing, 'sink', sink_shape, sink_slicing)
    
    if threshold:
      sink_prev = torch.from_numpy(np.asarray(sink[sink_slicing], dtype='uint8'));
    else:
      sink_prev = torch.from_numpy(sink[sink_slicing]);
    
    if cuda:
      sink_prev = sink_prev.cuda();
      sink_prev = tor.to(sink_prev, dtype=dtype);
    else:
      sink_prev  = sink_prev.float();

    #print('slices:', result[result_slicing].shape, data[data_slicing].shape, sink_prev.shape)
    
    result = torch.max(torch.max(result[result_slicing], data[data_slicing]), sink_prev);
    if threshold:
      result = result >= threshold;
    if verbose:
      print('Vessel filling: thresholded %r' % (tuple(result.shape),));
    
    if cuda:
      sink[sink_slicing] = result.data.cpu();
    else:
      sink[sink_slicing] = result.data;
    
    if verbose:
      print('Vessel filling: result written to %r' % (sink_slicing,));

    del data, result, sink_prev;
    gc.collect();
    
    if verbose:
      timer_block.print_elapsed_time('Vessel filling: processing block %s' % (block.info()));
   
  if verbose:
    timer.print_elapsed_time('Vessel filling');
  
  return sink;
Esempio n. 20
0
def detect_cells_block(source, parameter=default_cell_detection_parameter):
    """Detect cells in a Block."""

    #initialize parameter and slicings
    verbose = parameter.get('verbose', False)
    if verbose:
        prefix = 'Block %s: ' % (source.info(), )
        total_time = tmr.Timer(prefix)

    base_slicing = source.valid.base_slicing
    valid_slicing = source.valid.slicing
    valid_lower = source.valid.lower
    valid_upper = source.valid.upper
    lower = source.lower

    parameter_intensity = parameter.get('intensity_detection', None)
    measure_to_array = dict()
    if parameter_intensity:
        parameter_intensity = parameter_intensity.copy()
        measure = parameter_intensity.pop('measure', [])
        if measure is None:
            measure = []
        for m in measure:
            measure_to_array[m] = None

    if 'source' in measure_to_array:
        measure_to_array['source'] = source

    # correct illumination
    parameter_illumination = parameter.get('illumination_correction', None)
    if parameter_illumination:
        parameter_illumination = parameter_illumination.copy()
        if verbose:
            timer = tmr.Timer(prefix)
            hdict.pprint(parameter_illumination,
                         head=prefix + 'Illumination correction')
        save = parameter_illumination.pop('save', None)

        corrected = ic.correct_illumination(source, **parameter_illumination)

        if save:
            save = io.as_source(save)
            save[base_slicing] = corrected[valid_slicing]

        if verbose:
            timer.print_elapsed_time('Illumination correction')
    else:
        corrected = np.array(source.array)

    if 'illumination' in measure_to_array:
        measure_to_array['illumination'] = corrected

    #background subtraction
    parameter_background = parameter.get('background_correction', None)
    if parameter_background:
        parameter_background = parameter_background.copy()
        if verbose:
            timer = tmr.Timer(prefix)
            hdict.pprint(parameter_background,
                         head=prefix + 'Background removal')
        save = parameter_background.pop('save', None)

        background = remove_background(corrected, **parameter_background)

        if save:
            save = io.as_source(save)
            save[base_slicing] = corrected[valid_slicing]

        if verbose:
            timer.print_elapsed_time('Illumination correction')
    else:
        background = corrected

    del corrected

    if 'background' in measure_to_array:
        measure_to_array['background'] = background

    # equalize
    parameter_equalize = parameter.get('equalization', None)
    if parameter_equalize:
        parameter_equalize = parameter_equalize.copy()
        if verbose:
            timer = tmr.Timer(prefix)
            hdict.pprint(parameter_equalize, head=prefix + 'Equalization:')

        save = parameter_equalize.pop('save', None)

        equalized = equalize(background, mask=None, **parameter_equalize)

        if save:
            save = io.as_source(save)
            save[base_slicing] = equalized[valid_slicing]

        if verbose:
            timer.print_elapsed_time('Equalization')

    else:
        equalized = background

    del background

    if 'equalized' in measure_to_array:
        measure_to_array['equalized'] = equalized

    #DoG filter
    parameter_dog_filter = parameter.get('dog_filter', None)
    if parameter_dog_filter:
        parameter_dog_filter = parameter_dog_filter.copy()
        if verbose:
            timer = tmr.Timer(prefix)
            hdict.pprint(parameter_dog_filter, head=prefix + 'DoG filter:')

        save = parameter_dog_filter.pop('save', None)

        dog = dog_filter(equalized, **parameter_dog_filter)

        if save:
            save = io.as_source(save)
            save[base_slicing] = dog[valid_slicing]

        if verbose:
            timer.print_elapsed_time('DoG filter')

    else:
        dog = equalized

    del equalized

    if 'dog' in measure_to_array:
        measure_to_array['dog'] = dog

    #Maxima detection
    parameter_maxima = parameter.get('maxima_detection', None)
    parameter_shape = parameter.get('shape_detection', None)

    if parameter_shape or parameter_intensity:
        if not parameter_maxima:
            print(
                prefix +
                'Warning: maxima detection needed for shape and intensity detection!'
            )
            parameter_maxima = dict()

    if parameter_maxima:
        parameter_maxima = parameter_maxima.copy()
        if verbose:
            timer = tmr.Timer(prefix)
            hdict.pprint(parameter_maxima, head=prefix + 'Maxima detection:')

        save = parameter_maxima.pop('save', None)
        valid = parameter_maxima.pop('valid', None)

        # extended maxima
        maxima = md.find_maxima(source.array,
                                **parameter_maxima,
                                verbose=verbose)

        if save:
            save = io.as_source(save)
            save[base_slicing] = maxima[valid_slicing]

        #center of maxima
        if parameter_maxima['h_max']:
            centers = md.find_center_of_maxima(source,
                                               maxima=maxima,
                                               verbose=verbose)
        else:
            centers = ap.where(maxima).array

        if verbose:
            timer.print_elapsed_time('Maxima detection')

        #correct for valid region
        if valid:
            ids = np.ones(len(centers), dtype=bool)
            for c, l, u in zip(centers.T, valid_lower, valid_upper):
                ids = np.logical_and(ids, np.logical_and(l <= c, c < u))
            centers = centers[ids]
            del ids

    del dog, maxima

    results = (centers, )

    #cell shape detection
    if parameter_shape:
        parameter_shape = parameter_shape.copy()
        if verbose:
            timer = tmr.Timer(prefix)
            hdict.pprint(parameter_shape, head=prefix + 'Shape detection:')

        save = parameter_shape.pop('save', None)

        # shape detection
        shape = sd.detect_shape(source,
                                centers,
                                **parameter_shape,
                                verbose=verbose)

        if save:
            save = io.as_source(save)
            save[base_slicing] = shape[valid_slicing]

        #size detection
        max_label = centers.shape[0]
        sizes = sd.find_size(shape, max_label=max_label)
        valid = sizes > 0

        if verbose:
            timer.print_elapsed_time('Shape detection')

        results += (sizes, )

    else:
        valid = None
        shape = None

    #cell intensity detection
    if parameter_intensity:
        parameter_intensity = parameter_intensity.copy()
        if verbose:
            timer = tmr.Timer(prefix)
            hdict.pprint(parameter_intensity,
                         head=prefix + 'Intensity detection:')

        if not shape is None:
            r = parameter_intensity.pop('shape', 3)
            if isinstance(r, tuple):
                r = r[0]

        for m in measure:
            if shape is not None:
                intensity = sd.find_intensity(measure_to_array[m],
                                              label=shape,
                                              max_label=max_label,
                                              **parameter_intensity)
            else:
                intensity = me.measure_expression(measure_to_array[m],
                                                  centers,
                                                  search_radius=r,
                                                  **parameter_intensity,
                                                  processes=1,
                                                  verbose=False)

            results += (intensity, )

        if verbose:
            timer.print_elapsed_time('Shape detection')

    if valid is not None:
        results = tuple(r[valid] for r in results)

    #correct coordinate offsets of blocks
    results = (results[0] + lower, ) + results[1:]

    #correct shapes for merging
    results = tuple(r[:, None] if r.ndim == 1 else r for r in results)

    if verbose:
        total_time.print_elapsed_time('Cell detection')

    gc.collect()

    return results
Esempio n. 21
0
def _test():
    import numpy as np
    import ClearMap.ParallelProcessing.DataProcessing.ArrayProcessing as ap

    ## Lookup table processing

    #apply_lut
    x = np.random.randint(0, 100, size=(20, 30))
    lut = np.arange(100) + 1
    y = ap.apply_lut(x, lut)
    assert np.all(y == x + 1)

    #apply_lut_to_index
    import ClearMap.ImageProcessing.Topology.Topology3d as t3d
    kernel = t3d.index_kernel(dtype=int)

    import ClearMap.ImageProcessing.Binary.Smoothing as sm
    lut = sm.initialize_lookup_table()

    data = np.array(np.random.rand(150, 30, 40) > 0.75, order='F')

    result = ap.apply_lut_to_index(data, kernel, lut, sink=None, verbose=True)

    import ClearMap.Visualization.Plot3d as p3d
    p3d.plot([[data, result]])

    ### Correlation

    #correlate1d
    kernel = np.array(range(11), dtype='uint32')
    data = np.array(np.random.randint(0,
                                      2**27, (300, 400, 1500),
                                      dtype='uint32'),
                    order='F')
    #data = np.array(np.random.rand(3,4,5), order='F');

    data = np.empty((300, 400, 1500), order='F')
    kernel = np.array([1, 2, 3, 4, 5], dtype='uint8')

    sink = 'test.npy'

    import ClearMap.Utils.Timer as tmr
    import scipy.ndimage as ndi
    timer = tmr.Timer()
    for axis in range(3):
        print(axis)
        corr_ndi = ndi.correlate1d(data, axis=axis, mode='constant', cval=0)
    timer.print_elapsed_time('ndi')

    timer = tmr.Timer()
    for axis in range(3):
        print(axis)
        corr = ap.correlate1d(data,
                              sink=sink,
                              kernel=kernel,
                              axis=axis,
                              verbose=False,
                              processes=None)
    timer.print_elapsed_time('ap')

    assert np.allclose(corr.array, corr_ndi)

    # IO
    import ClearMap.ParallelProcessing.DataProcessing.ArrayProcessing as ap
    import numpy as np
    reload(ap)

    data = np.random.rand(10, 200, 10)

    sink = ap.write('test.npy', data, verbose=True)
    assert (np.all(sink.array == data))

    read = ap.read('test.npy', verbose=True)
    assert (np.all(read.array == data))

    ap.io.delete_file('test.npy')

    # where
    reload(ap)
    data = np.random.rand(30, 20, 40) > 0.5

    where_np = np.array(np.where(data)).T
    where = ap.where(data, cutoff=2**0)

    check_np = np.zeros(data.shape, dtype=bool)
    check = np.zeros(data.shape, dtype=bool)
    check_np[tuple(where_np.T)] = True
    check[tuple(where.array.T)] = True
    assert (np.all(check_np == check))
Esempio n. 22
0
def index_from_binary(source,
                      sink=None,
                      method='shared',
                      dtype='uint32',
                      processes=None,
                      verbose=False):
    """Calculate the local 3x3x3 configuration in a binary source.
  
  Note
  ----
  The configuration kernel is separable and convolution with it 
  is calculated via a sequence of 1d convolutions.
  """
    processes, timer = ap.initialize_processing(processes=processes,
                                                verbose=verbose,
                                                function='index_from_binary')

    #determine configuration
    source, source_buffer, source_shape, source_strides, source_order = ap.initialize_source(
        source,
        as_1d=True,
        return_shape=True,
        return_strides=True,
        return_order=True)
    ndim = len(source_shape)

    buffer_dtype = np.result_type(source_buffer.dtype, 'uint32')

    delete_files = []
    if source_order == 'C':
        axis_range = range(ndim - 1, -1, -1)
        axis_last = 0
    else:
        axis_range = range(ndim)
        axis_last = ndim - 1
    for axis in axis_range:
        if axis == axis_last:
            sink, sink_buffer, sink_shape, sink_strides = ap.initialize_sink(
                sink=sink,
                as_1d=True,
                source=source,
                dtype=dtype,
                return_shape=True,
                return_strides=True)
        else:
            if method == 'shared':
                _, sink_buffer, sink_shape, sink_strides = ap.initialize_sink(
                    sink=None,
                    as_1d=True,
                    shape=source_shape,
                    dtype=buffer_dtype,
                    order=source_order,
                    return_shape=True,
                    return_strides=True)
            else:
                location = tempfile.mktemp() + '.npy'
                _, sink_buffer, sink_shape, sink_strides = ap.initialize_sink(
                    sink=location,
                    as_1d=True,
                    shape=tuple(source_shape),
                    dtype=buffer_dtype,
                    order=source_order,
                    return_shape=True,
                    return_strides=True)
                delete_files.append(location)

        kernel = index_kernel(axis=axis, dtype=float)

        #print(source_buffer.dtype, source_buffer.shape, source_shape, source_strides, axis, sink_buffer.shape, sink_buffer.dtype, sink_strides, kernel.dtype)
        ap.code.correlate_1d(source_buffer, source_shape, source_strides,
                             sink_buffer, sink_shape, sink_strides, kernel,
                             axis, processes)
        source_buffer = sink_buffer

    for f in delete_files:
        io.delete_file(f)

    ap.finalize_processing(verbose=verbose,
                           function='index_from_binary',
                           timer=timer)

    return sink
Esempio n. 23
0
 def read(self, *args, **kwargs):
  return ap.read(self.filename(*args, **kwargs));