Esempio n. 1
0
def _test():
  """Tests."""
  import numpy as np
  import ClearMap.Visualization.Plot3d as p3d
  import ClearMap.Tests.Files as tsf
  import ClearMap.ImageProcessing.Experts.Vasculature as vasc
  
  source = np.array(tsf.source('vls')[:300,:300,80:120]);
  source[:,:,[0,-1]] = 0;
  source[:,[0,-1],:] = 0;
  source[[0,-1],:,:] = 0;
    
  bpar = vasc.default_binarization_parameter.copy();
  bpar['clip']['clip_range'] = (150, 7000)
  bpar['as_memory'] = True
  #bpar['binary_status'] = 'binary_status.npy'
  
  ppar = vasc.default_processing_parameter.copy();
  ppar['processes'] = 10;
  ppar['size_max'] = 10;
  
  sink='binary.npy'
  #sink=None;
  
  binary = vasc.binarize(source, sink=sink, binarization_parameter=bpar, processing_parameter = ppar) 
  p3d.plot([source, binary])

  import ClearMap.IO.IO as io
  io.delete_file(sink)
  
  pppar = vasc.default_postprocessing_parameter.copy();
  pppar['smooth']['iterations'] = 3;
  smoothed = vasc.postprocess(binary, postprocessing_parameter=pppar)
  p3d.plot([binary, smoothed])
Esempio n. 2
0
def _test():
    import numpy as np
    import ClearMap.Alignment.Annotation as ano
    #reload(ano)

    points = np.array([[162, 200, 138], [246, 486, 138], [246, 486, 138]])

    label = ano.label_points(points)
    print(label)

    cnts = ano.count_points(points)
    print(cnts)

    cnts = ano.count_points(points, hierarchical=False)
    print(cnts)

    import ClearMap.IO.IO as io
    ano.write_color_annotation('test.tif')
    io.delete_file('test.tif')

    l = ano.find(247, key='id')
    print(l)
    l.info(with_children=True)
    print(l.level)
Esempio n. 3
0
def _test():
    import numpy as np
    import ClearMap.IO.IO as io
    import ClearMap.ParallelProcessing.BlockProcessing as bp

    source = io.as_source(np.asarray(np.random.rand(50, 100, 200), order='F'))

    blocks = bp.split_into_blocks(source,
                                  processes=10,
                                  axes=[2],
                                  size_min=30,
                                  size_max=50,
                                  overlap=20)
    print(blocks)

    b = blocks[0]
    print(b.valid.base_shape)
    print(b.valid.base_slicing)
    print(blocks[5].iteration)

    blocks = bp.split_into_blocks(source,
                                  processes=10,
                                  axes=[1, 2],
                                  size_min=30,
                                  size_max=50,
                                  overlap=20,
                                  neighbours=True)
    b = blocks[0]
    print(b.valid.base_shape)
    print(b.valid.base_slicing)

    blocks = bp.split_into_blocks(source,
                                  processes=10,
                                  axes=[1, 2],
                                  size_min='fixed',
                                  size_max=50,
                                  overlap=20,
                                  neighbours=True)
    b = blocks[0]
    print(b.valid.base_shape)
    print(b.valid.base_slicing)

    shape = (2, 3, 20)
    source = io.npy.Source(array=np.random.rand(*shape))
    sink = io.npy.Source(array=np.zeros(shape))

    def process_image(source, sink=None):
        if sink is None:
            sink = np.zeros(source.shape)
        sink[:] = 100 * source[:]
        return sink

    bp.process(process_image,
               source,
               sink,
               processes='serial',
               size_max=4,
               size_min=1,
               overlap=0,
               axes=[2],
               optimization=True,
               verbose=True)

    print(np.all(sink[:] == process_image(source)))

    bp.process(process_image,
               source,
               sink,
               processes=None,
               size_max=10,
               size_min=6,
               overlap=3,
               axes=all,
               optimization=True,
               verbose=True)

    assert (np.all(sink[:] == process_image(source)))

    result, blocks = bp.process(process_image,
                                source,
                                sink,
                                size_max=15,
                                size_min=4,
                                overlap=3,
                                axes=[2],
                                optimization=True,
                                return_blocks=True,
                                processes=None,
                                verbose=True)

    #memmaps loading
    source = io.mmp.create(location='source.npy', shape=shape)
    source[:] = np.random.rand(*shape)
    sink = io.mmp.create(location='sink.npy', shape=shape)

    bp.process(process_image,
               source,
               sink,
               size_max=10,
               size_min=6,
               overlap=3,
               axes=[2],
               optimization=True,
               as_memory=True,
               verbose=True,
               processes=None)

    assert (np.all(sink[:] == process_image(source)))

    io.delete_file(source.location)
    io.delete_file(sink.location)

    #multiple sources and sinks
    shape = (2, 50, 30)
    source1 = io.sma.Source(array=np.random.rand(*shape))
    source2 = io.sma.Source(array=np.random.rand(*shape))
    sink1 = io.sma.Source(array=np.zeros(shape))
    sink2 = io.sma.Source(array=np.zeros(shape))

    def sum_and_difference(source1, source2, sink1=None, sink2=None):
        if sink1 is None:
            sink1 = np.zeros(source1.shape)
        if sink2 is None:
            sink2 = np.zeros(source2.shape)

        sink1[:] = source1[:] + source2[:]
        sink2[:] = source1[:] - source2[:]
        return sink1, sink2

    bp.process(sum_and_difference, [source1, source2], [sink1, sink2],
               processes='!serial',
               size_max=10,
               size_min=5,
               overlap=3,
               axes=[1, 2],
               optimization=True,
               verbose=True)

    s, d = sum_and_difference(source1, source2)
    assert (np.all(sink1[:] == s))
    assert (np.all(sink2[:] == d))

    #trace backs
    shape = (3, 4)
    source = io.sma.Source(array=np.random.rand(*shape))
    sink = io.sma.Source(array=np.zeros(shape))

    def raise_error(source, sink=None):
        raise RuntimeError('test')

    bp.process(raise_error,
               source,
               sink,
               processes='!serial',
               size_max=10,
               size_min=5,
               overlap=0,
               axes=[2],
               optimization=True,
               verbose=True)
Esempio n. 4
0
def resample_inverse(source, sink = None, 
                     resample_source = None, resample_sink = None,
                     orientation = None, 
                     source_shape = None, source_resolution = None, 
                     sink_shape = None, sink_resolution = None, 
                     axes_order = None, method = 'memmap',
                     interpolation = 'linear', 
                     processes = None, verbose = True, **args):
  """Resample data inversely to :func:`resample` routine.
  
  Arguments
  ---------
  source : str, array
    Source to be inversly resampled (e.g. sink in :func:`resample`).
  sink : str or None
    Sink to write the inversly resampled image to.
  resample_source : str, array or None
    Optional source in :func:`resample`.
  resmaple_sink: str, array or None
    Optional sink used in :func:`resample`.
  orientation : tuple
    Orientation as specified as in :func:`resample`.
  source_shape : tuple or None
    Optional value of source_shape as in :func:`resample`.
  source_resolution : tuple or None
    Optional value of source_resolution as in :func:`resample`.
  sink_resolution : tuple or None
    Optional value of sink_resolution as in :func:`resample`.
  processing_directory : str or None
    Optional directory in which to perform resmapling in parallel.
    If None, a temporary directry will be created.
  axis_order : list of tuples of int or None
    The axes pairs along which to resample the data as in :func:`resample`.
  method : 'shared' or 'memmap'
    Method to handle intermediate resampling results. If 'shared' use shared 
    memory, otherwise use a memory map on disk.
  interpolation : str
    Method to use for interpolating to the resmapled image. 
  processes int or None
    Number of processes to use for parallel resampling.
  verbose : bool
    If True, print progress information.
   
  Returns
  -------
  resampled : array or str
     Data or file name of inversly resampled image.

  Notes
  -----
  * All arguments, except source and sink should be passed as :func:`resample`
    to invert the resmapling.
  """   
  source = io.as_source(source);
  ndim = source.ndim;
  dtype = source.dtype;
  
  #orientation
  orientation = format_orientation(orientation);
  orientation_inverse = inverse_orientation(orientation);
  
  #original source info
  if source_shape is None:
    if source_resolution is None and resample_source is None:
      raise ValueError('Either source_shape, source_resolution or resample_source must to be given!')
    if resample_source is not None:
      source_shape = io.shape(resample_source);
  
  #original sink info
  if sink_shape is None and sink_resolution is None: 
    if resample_sink is None:
      sink_shape = io.shape(source);
    else:
      sink_shape = io.shape(resample_sink);
  
  source_shape, sink_shape, source_resolution, sink_resolution = \
      resample_shape(source_shape=source_shape, sink_shape=sink_shape, 
                     source_resolution=source_resolution, sink_resolution=sink_resolution, 
                     orientation=orientation);
  
  sink_shape_in_source_orientation = orient_shape(sink_shape, orientation, inverse=True);
  
  axes_order, shape_order = _axes_order(axes_order, source, source_shape, sink_shape_in_source_orientation);
 
  interpolation = _interpolation_to_cv2(interpolation);                                   

  if processes is None or not processes == 'serial':
      processes = io.mp.cpu_count();
  
  #reversed orientation
  if not orientation is None:
    #reverse axes
    slicing = [slice(None)] * ndim;
    reslice = False;
    for d,o in enumerate(orientation):
      if o < 0:
        slicing[d] = slice(None, None, -1);
        reslice = True;
    if reslice:
      source = source[slicing];   
    
    #re-orient
    per = orientation_to_permuation(orientation_inverse);
    source = io.read(source);
    source = source.transpose(per);
    source = io.sma.as_shared(source);
 
  #reverse resampling steps
  axes_order = axes_order[::-1];
  
  shape_order = shape_order[:-1];
  shape_order = shape_order[::-1];
  shape_order = shape_order + [source_shape]
  #print(axes_order, shape_order)
  
  #reverse resampling
  n_steps = len(axes_order);
  last_source = source;
  delete_files = [];
  #print(last_source)
  for step, axes, shape in zip(range(n_steps), axes_order, shape_order):
    if step == n_steps-1:
      resampled = io.initialize(source=sink, shape=shape, dtype=dtype, memory='shared', as_source=True); 
    else:
      if method == 'shared':
        resampled = io.sma.create(shape, dtype=dtype, order='C', as_source=True);
      else:
        location = tempfile.mktemp() + '.npy';
        resampled = io.mmp.create(location, shape=shape, dtype=dtype, order='C', as_source=True);
        delete_files.append(location);

    #indices for non-resampled axes
    indices = tuple([range(s) for d,s in enumerate(shape) if d not in axes]);
    indices = [i for i in itertools.product(*indices)];
    n_indices = len(indices);
    
    #resample step
    last_source_virtual = last_source.as_virtual();
    resampled_virtual = resampled.as_virtual();
    _resample = ft.partial(_resample_2d, source=last_source_virtual, sink=resampled_virtual, axes=axes, shape=shape, 
                                         interpolation=interpolation, n_indices=n_indices, verbose=verbose)                       
    
    if processes == 'serial': 
      for index in indices:
        _resample(index=index);
    else:
      with concurrent.futures.ProcessPoolExecutor(processes) as executor:
        executor.map(_resample, indices);
        
    last_source = resampled;
  
  for f in delete_files:
      io.delete_file(f);  
  
  sink = resampled.as_real();
      
  return sink;
Esempio n. 5
0
def resample(source, sink = None, orientation = None, 
             sink_shape = None, source_resolution = None, sink_resolution = None, 
             interpolation = 'linear', axes_order = None, method = 'shared',
             processes = None, verbose = True):
  """Resample data of source in new shape/resolution and orientation.
  
  Arguments
  ---------
  source : str or array
    The source to be resampled.
  sink : str or None
    The sink for the resampled image.
  orientation : tuple or None:
    The orientation specified by permuation and change in sign of (1,2,3).
  sink_shape : tuple or None
    The target shape of the resampled sink.
  source_resolution : tuple or None
    The resolution of the source (in length per pixel).
  sink_resolution : tuple or None
    The resolution of the resampled source (in length per pixel).
  interpolation : str 
    The method to use for interpolating to the resmapled array.
  axis_order : str, list of tuples of int or None
    The axes pairs along which to resample the data at each step.
    If None, this is detertmined automatically. For a FileList source, 
    setting the first tuple should point to axis not indicating files.
    If 'size' the axis order is determined automatically to maximally reduce 
    the size of the array in each resmapling step.
    If 'order' the axis order is chosed automatically to optimize io speed.
  method : 'shared' or 'memmap'
    Method to handle intermediate resampling results. If 'shared' use shared 
    memory, otherwise use a memory map on disk.
  processes : int, None or 'serial'
    Number of processes to use for parallel resampling, if None use maximal 
    processes avaialable, if 'serial' process in serial.
  verbose : bool
    If True, display progress information.
  
  Returns
  -------
  sink : array or str
    The data or filename of resampled sink.

  Notes
  -----
  * Resolutions are assumed to be given for the axes of the intrinsic 
    orientation of the data and reference (as when viewed by ImageJ).
  * Orientation: permuation of 1,2,3 with potential sign, indicating which 
    axes map onto the reference axes, a negative sign indicates reversal 
    of that particular axes.
  * Only a minimal set of information to determine the resampling parameter 
    has to be given, e.g. source_shape and sink_shape.
  * The resampling is done by iterating two dimensional resampling steps.
  """
  #TODO: write full nd resampling routine extending cv2 lib.
  if verbose:
    timer = tmr.Timer();
  
  source = io.as_source(source);
  source_shape = source.shape;
  ndim = len(source_shape);
  dtype = source.dtype;
  order = source.order;
  
  orientation = format_orientation(orientation);
  
  source_shape, sink_shape, source_resolution, sink_resolution = \
     resample_shape(source_shape=source_shape, sink_shape=sink_shape, 
                    source_resolution=source_resolution, sink_resolution=sink_resolution, 
                    orientation=orientation);
  
  sink_shape_in_source_orientation = orient_shape(sink_shape, orientation, inverse=True);
                                   
  interpolation = _interpolation_to_cv2(interpolation);                                   

  if not isinstance(processes, int) and processes != 'serial':
    processes = io.mp.cpu_count();
  
  #detemine order of resampling
  axes_order, shape_order = _axes_order(axes_order, source, sink_shape_in_source_orientation, order=order);
  #print(axes_order, shape_order) 
  
  if len(axes_order) == 0:
    if verbose:
      print('resampling: no resampling necessary, source has same size as sink!');
    if sink != source:
      return io.write(sink, source);
    else:
      return source;
  
  #resample
  n_steps = len(axes_order);
  last_source = source;
  delete_files = [];
  for step, axes, shape in zip(range(n_steps), axes_order, shape_order):
    if step == n_steps-1 and orientation is None:
      resampled = io.initialize(source=sink, shape=sink_shape, dtype=dtype, as_source=True); 
    else:
      if method == 'shared':
        resampled = io.sma.create(shape, dtype=dtype, order=order, as_source=True);
      else:
        location = tempfile.mktemp() + '.npy';
        resampled = io.mmp.create(location, shape=shape, dtype=dtype, order=order, as_source=True);
        delete_files.append(location);
    #print(resampled)

    #indices for non-resampled axes
    indices = tuple([range(s) for d,s in enumerate(shape) if d not in axes]);
    indices = [i for i in itertools.product(*indices)];
    n_indices = len(indices);
    
    #resample step
    last_source_virtual = last_source.as_virtual();
    resampled_virtual = resampled.as_virtual();
    _resample = ft.partial(_resample_2d, source=last_source_virtual, sink=resampled_virtual, axes=axes, shape=shape, 
                                         interpolation=interpolation, n_indices=n_indices, verbose=verbose)                       
    
    if processes == 'serial': 
      for index in indices:
        _resample(index=index);
    else:
      #print(processes);
      with concurrent.futures.ProcessPoolExecutor(processes) as executor:
        executor.map(_resample, indices);
        
    last_source = resampled;
  
  #fix orientation
  if not orientation is None:
    #permute
    per = orientation_to_permuation(orientation);
    resampled = resampled.transpose(per);

    #reverse axes
    reslice = False;
    slicing = [slice(None)] * ndim;
    for d,o in enumerate(orientation):
      if o < 0:
        slicing[d] = slice(None, None, -1);
        reslice = True;
    if reslice:
      resampled = resampled[slicing];
      
    if verbose:
      print("resample: re-oriented shape %r!" % (resampled.shape,))
  
    sink = io.write(sink, resampled);
  else: 
    sink = resampled;
  
  for f in delete_files:
      io.delete_file(f);
  
  if verbose:
    timer.print_elapsed_time('Resampling')
    
  return sink;
Esempio n. 6
0
def postprocess(source, sink = None, postprocessing_parameter = default_postprocessing_parameter, processing_parameter = default_postprocessing_processing_parameter, processes = None, verbose = True):
  """Postprocess a binarized image.
  
  Arguments
  ---------
  source : source specification
    The binary  source.
  sink : sink specification or None
    The sink to write the postprocesses result to. 
    If None, an array is returned.
  postprocessing_parameter : dict
    Parameter for the postprocessing.
  processing_parameter : dict
    Parameter for the parallel processing.
  verbose : bool
    If True, print progress output.
  
  Returns
  -------
  sink : Source
    The result of the binarization.
    
  Notes
  -----
  * The postporcessing pipeline is composed of several steps. The parameters
    for each step are passed as sub-dictionaries to the 
    postprocessing_parameter dictionary.
  
  * If None is passed for one of the steps the step is skipped.
    
  Smoothing
  ---------
  smooth : dict or None
    Smoothing step parameter. See
    :func:`ClearMap.ImageProcessing.Binary.Smoothing.smooth_by_configuration`

    iterations : int
      Number of smoothing iterations.
      
      For the vasculature a typical value is 6.
  
  Filling
  -------
  fill : bool or None
    If True, fill holes in the binary data.
  """
  
  source = io.as_source(source);  
  sink   = ap.initialize_sink(sink, shape=source.shape, dtype=source.dtype, order=source.order, return_buffer=False);
  
  if verbose:
    timer = tmr.Timer();
    print('Binary post processing: initialized.');
  
  postprocessing_parameter = postprocessing_parameter.copy();
  parameter_smooth = postprocessing_parameter.pop('smooth', None);
  parameter_fill   = postprocessing_parameter.pop('fill', None);
  #print(parameter_smooth, parameter_fill)
  
  #smoothing
  save = None;
  if parameter_smooth:
    #intialize temporary files if needed
    if parameter_fill:
      save = parameter_smooth.pop('save', None);
      temporary_filename = save; 
      if temporary_filename is None:
        temporary_filename = postprocessing_parameter['temporary_filename'];
      if temporary_filename is None:
        temporary_filename = tmpf.mktemp(prefix='TubeMap_Vasculature_postprocessing', suffix='.npy');
      sink_smooth   = ap.initialize_sink(temporary_filename, shape=source.shape, dtype=source.dtype, order=source.order, return_buffer=False);
    else:
      sink_smooth = sink;
    
    #run smoothing
    source_fill = bs.smooth_by_configuration(source, sink=sink_smooth, processing_parameter=processing_parameter, processes=processes, verbose=verbose, **parameter_smooth);
  
  else:
    source_fill = source;
  
  if parameter_fill:
    sink = bf.fill(source_fill, sink=sink, processes=processes, verbose=verbose);
    
    if parameter_smooth and save is None:
      io.delete_file(temporary_filename);
  else:
    sink = source_fill;
  
  if verbose:
    timer.print_elapsed_time('Binary post processing');
  
  gc.collect()
  return None;
Esempio n. 7
0
    ws = wsp.Workspace('CellMap', directory=directory)
    ws.update(raw=expression_raw)
    #ws.debug = 'medchunk'
    print(ws.info())

    # convert raw to stitched npy file
    source = ws.source('raw')
    sink = ws.filename('stitched')

    if not os.path.exists(os.path.join(directory, "final_blocks")):
        os.mkdir(os.path.join(directory, "final_blocks"))

    if step == 0:
        print("++++++++++ STEP 0 +++++++++++++")
        # convert single z planes to stitched
        io.delete_file(sink)
        io.convert(source, sink, processes=None, verbose=True)

    elif step == 1:
        # Split into blocks
        print("splitting into blocks")
        blocks = bp.split_into_blocks(
            ws.source('stitched'),
            processes=12,
            axes=[2],  # chunks along z
            size_min=5,
            size_max=20,
            overlap=2,
            verbose=True)
        print("Done splitting into blocks of len {} for array {}".format(
            len(blocks), arrayid))
Esempio n. 8
0
def index_from_binary(source,
                      sink=None,
                      method='shared',
                      dtype='uint32',
                      processes=None,
                      verbose=False):
    """Calculate the local 3x3x3 configuration in a binary source.
  
  Note
  ----
  The configuration kernel is separable and convolution with it 
  is calculated via a sequence of 1d convolutions.
  """
    processes, timer = ap.initialize_processing(processes=processes,
                                                verbose=verbose,
                                                function='index_from_binary')

    #determine configuration
    source, source_buffer, source_shape, source_strides, source_order = ap.initialize_source(
        source,
        as_1d=True,
        return_shape=True,
        return_strides=True,
        return_order=True)
    ndim = len(source_shape)

    buffer_dtype = np.result_type(source_buffer.dtype, 'uint32')

    delete_files = []
    if source_order == 'C':
        axis_range = range(ndim - 1, -1, -1)
        axis_last = 0
    else:
        axis_range = range(ndim)
        axis_last = ndim - 1
    for axis in axis_range:
        if axis == axis_last:
            sink, sink_buffer, sink_shape, sink_strides = ap.initialize_sink(
                sink=sink,
                as_1d=True,
                source=source,
                dtype=dtype,
                return_shape=True,
                return_strides=True)
        else:
            if method == 'shared':
                _, sink_buffer, sink_shape, sink_strides = ap.initialize_sink(
                    sink=None,
                    as_1d=True,
                    shape=source_shape,
                    dtype=buffer_dtype,
                    order=source_order,
                    return_shape=True,
                    return_strides=True)
            else:
                location = tempfile.mktemp() + '.npy'
                _, sink_buffer, sink_shape, sink_strides = ap.initialize_sink(
                    sink=location,
                    as_1d=True,
                    shape=tuple(source_shape),
                    dtype=buffer_dtype,
                    order=source_order,
                    return_shape=True,
                    return_strides=True)
                delete_files.append(location)

        kernel = index_kernel(axis=axis, dtype=float)

        #print(source_buffer.dtype, source_buffer.shape, source_shape, source_strides, axis, sink_buffer.shape, sink_buffer.dtype, sink_strides, kernel.dtype)
        ap.code.correlate_1d(source_buffer, source_shape, source_strides,
                             sink_buffer, sink_shape, sink_strides, kernel,
                             axis, processes)
        source_buffer = sink_buffer

    for f in delete_files:
        io.delete_file(f)

    ap.finalize_processing(verbose=verbose,
                           function='index_from_binary',
                           timer=timer)

    return sink