def rotations_faces(cube): U = cube.copy(); N = t3d.rotate(cube, axis=0, steps=1); W = t3d.rotate(cube, axis=1, steps=1); UNW = [U,N,W]; DSE = [t3d.reflect(X) for X in UNW]; return UNW + DSE;
def rotations_nodes(cube): UNW = cube.copy(); UNE = t3d.rotate(cube, axis=2,steps=1); USE = t3d.rotate(cube, axis=2,steps=2); USW = t3d.rotate(cube, axis=2,steps=3); R = [t3d.reflect(X) for X in [UNW,UNE,USE,USW]]; return [UNW,UNE, USE,USW] + R
def match_non_removable(index, verbose=True): if verbose and index % 2**14 == 0: print('PK12 LUT non-removables: %d / %d' % (index, 2**26)) cube = t3d.cube_from_index(index=index, center=False) n = cube.sum() if n < 2: return True if n > 3: return False x, y, z = np.where(cube) if n == 2: if np.any(np.abs([x[1] - x[0], y[1] - y[0], z[1] - z[0]]) == 2): return True else: return False else: if np.any(np.abs( [x[1] - x[0], y[1] - y[0], z[1] - z[0]]) == 2) and np.any( np.abs([x[2] - x[0], y[2] - y[0], z[2] - z[0]]) == 2) and np.any( np.abs([x[1] - x[2], y[1] - y[2], z[1] - z[2]]) == 2): return True else: return False
def rotations_edges(cube): UN = cube.copy(); UE = t3d.rotate(cube, axis=2, steps=1); US = t3d.rotate(cube, axis=2, steps=2); UW = t3d.rotate(cube, axis=2, steps=3); NW = t3d.rotate(cube, axis=1, steps=3); NE = t3d.rotate(cube, axis=1, steps=1); R = [t3d.reflect(X) for X in [UN,UE,US,UW,NW,NE]]; return [UN,UE,US,UW,NW,NE] + R;
def rotations_node_faces(cube): U_UNW = cube.copy(); #U1 U_UNE = t3d.rotate(cube, axis=2, steps=1); #U3 U_USE = t3d.rotate(cube, axis=2, steps=2); #U5 U_USW = t3d.rotate(cube, axis=2, steps=3); #U7 Us = [U_UNW, U_UNE, U_USE, U_USW]; Ns = [t3d.rotate(X, axis=0, steps=1) for X in Us]; # N7. N1, N3, N5 Ws = [t3d.rotate(X, axis=1, steps=1) for X in Us]; # W3, W1, W7, W5 UNWs = Us + Ns + Ws; DSEs = [t3d.reflect(X) for X in UNWs]; return UNWs + DSEs;
def smooth_by_configuration_block(source, iterations = 1, verbose = False): """Smooth a binary source using the local configuration around each pixel. Arguments --------- source : array The binary source to smooth. iterations : int Number of smoothing iterations. verbose : bool If True, print progress information. Returns ------- smoothed : array Thre smoothed binary array. """ if isinstance(source, io.src.Source): smoothed = source.array; else: smoothed = source; smoothed = np.asarray(smoothed, dtype='uint32'); ndim = smoothed.ndim; lut = np.asarray(initialize_lookup_table(verbose=verbose), dtype='uint32'); for i in range(iterations): #index for axis in range(ndim): kernel = t3d.index_kernel(axis=axis); smoothed = ndi.correlate1d(smoothed, kernel, axis=axis, output='uint32', mode='constant', cval=0); smoothed = lut[smoothed]; if verbose: print('Binary Smoothing: itertion %d / %d done!' % (i+1, iterations)); return np.asarray(smoothed, dtype=bool);
def index_to_smoothing(index, verbose = True): """Match index of configuration to smoothing action""" if verbose and index % 2**14 == 0: print('Smoothing LUT: %d / %d' % (index, 2**27)); cube = t3d.cube_from_index(index=index, center=None); return cube_to_smoothing(cube);
def _test(): import numpy as np import ClearMap.ParallelProcessing.DataProcessing.ArrayProcessing as ap ## Lookup table processing #apply_lut x = np.random.randint(0, 100, size=(20, 30)) lut = np.arange(100) + 1 y = ap.apply_lut(x, lut) assert np.all(y == x + 1) #apply_lut_to_index import ClearMap.ImageProcessing.Topology.Topology3d as t3d kernel = t3d.index_kernel(dtype=int) import ClearMap.ImageProcessing.Binary.Smoothing as sm lut = sm.initialize_lookup_table() data = np.array(np.random.rand(150, 30, 40) > 0.75, order='F') result = ap.apply_lut_to_index(data, kernel, lut, sink=None, verbose=True) import ClearMap.Visualization.Plot3d as p3d p3d.plot([[data, result]]) ### Correlation #correlate1d kernel = np.array(range(11), dtype='uint32') data = np.array(np.random.randint(0, 2**27, (300, 400, 1500), dtype='uint32'), order='F') #data = np.array(np.random.rand(3,4,5), order='F'); data = np.empty((300, 400, 1500), order='F') kernel = np.array([1, 2, 3, 4, 5], dtype='uint8') sink = 'test.npy' import ClearMap.Utils.Timer as tmr import scipy.ndimage as ndi timer = tmr.Timer() for axis in range(3): print(axis) corr_ndi = ndi.correlate1d(data, axis=axis, mode='constant', cval=0) timer.print_elapsed_time('ndi') timer = tmr.Timer() for axis in range(3): print(axis) corr = ap.correlate1d(data, sink=sink, kernel=kernel, axis=axis, verbose=False, processes=None) timer.print_elapsed_time('ap') assert np.allclose(corr.array, corr_ndi) # IO import ClearMap.ParallelProcessing.DataProcessing.ArrayProcessing as ap import numpy as np reload(ap) data = np.random.rand(10, 200, 10) sink = ap.write('test.npy', data, verbose=True) assert (np.all(sink.array == data)) read = ap.read('test.npy', verbose=True) assert (np.all(read.array == data)) ap.io.delete_file('test.npy') # where reload(ap) data = np.random.rand(30, 20, 40) > 0.5 where_np = np.array(np.where(data)).T where = ap.where(data, cutoff=2**0) check_np = np.zeros(data.shape, dtype=bool) check = np.zeros(data.shape, dtype=bool) check_np[tuple(where_np.T)] = True check[tuple(where.array.T)] = True assert (np.all(check_np == check))
def graph_from_skeleton(skeleton, points = None, radii = None, vertex_coordinates = True, check_border = True, delete_border = False, verbose = False): """Converts a binary skeleton image to a graph-tool graph. Arguments --------- skeleton : array Source with 2d/3d binary skeleton. points : array List of skeleton points as 1d indices of flat skeleton array (optional to save processing time). radii : array List of radii associated with each vertex. vertex_coordinates : bool If True, store coordiantes of the vertices / edges. check_border : bool If True, check if the boder is empty. The algorithm reuqires this. delete_border : bool If True, delete the border. verbose : bool If True, print progress information. Returns ------- graph : Graph class The graph corresponding to the skeleton. Note ---- Edges are detected between neighbouring foreground pixels using 26-connectivty. """ skeleton = io.as_source(skeleton); if delete_border: skeleton = t3d.delete_border(skeleton); check_border = False; if check_border: if not t3d.check_border(skeleton): raise ValueError('The skeleton array needs to have no points on the border!'); if verbose: timer = tmr.Timer(); timer_all = tmr.Timer(); print('Graph from skeleton calculation initialize.!'); if points is None: points = ap.where(skeleton.reshape(-1, order = 'A')).array; if verbose: timer.print_elapsed_time('Point list generation'); timer.reset(); #create graph n_vertices = points.shape[0]; g = ggt.Graph(n_vertices=n_vertices, directed=False); g.shape = skeleton.shape; if verbose: timer.print_elapsed_time('Graph initialized with %d vertices' % n_vertices); timer.reset(); #detect edges edges_all = np.zeros((0,2), dtype = int); for i,o in enumerate(t3d.orientations()): # calculate off set offset = np.sum((np.hstack(np.where(o))-[1,1,1]) * skeleton.strides) edges = ap.neighbours(points, offset); if len(edges) > 0: edges_all = np.vstack([edges_all, edges]); if verbose: timer.print_elapsed_time('%d edges with orientation %d/13 found' % (edges.shape[0], i+1)); timer.reset(); if edges_all.shape[0] > 0: g.add_edge(edges_all); if verbose: timer.print_elapsed_time('Added %d edges to graph' % (edges_all.shape[0])); timer.reset(); if vertex_coordinates: vertex_coordinates = np.array(np.unravel_index(points, skeleton.shape, order=skeleton.order)).T; g.set_vertex_coordinates(vertex_coordinates); if radii is not None: g.set_vertex_radius(radii); if verbose: timer_all.print_elapsed_time('Skeleton to Graph'); return g;
def skeletonize_index(binary, points=None, steps=None, removals=False, radii=False, return_points=False, check_border=True, delete_border=False, verbose=True): """Skeletonize a binary 3d array using PK12 algorithm via index coordinates. Arguments --------- binary : array Binary image to be skeletonized. steps : int or None Number of maximal iteration steps. If None, use maximal reduction. removals :bool If True, returns the steps in which the pixels in the input data were removed. radii :bool If True, the estimate of the local radius is returned. verbose :bool If True, print progress info. Returns ------- skeleton : array The skeleton of the binary input. points : nxd array The point coordinates of the skeleton. """ if verbose: print('#############################################################') print('Skeletonization PK12 [convolution, index]') timer = tmr.Timer() #TODO: make this work for any memmapable source if not isinstance(binary, np.ndarray): raise ValueError('Numpy array required for binary in skeletonization!') if binary.ndim != 3: raise ValueError('The binary array dimension is %d, 3 is required!' % binary.ndim) if delete_border: binary = t3d.delete_border(binary) check_border = False if check_border: if not t3d.check_border(binary): raise ValueError( 'The binary array needs to have not points on the border!') binary_flat = binary.reshape(-1, order='A') # detect points if points is None: points = ap.where(binary_flat).array npoints = points.shape[0] if verbose: timer.print_elapsed_time('Foreground points: %d' % (points.shape[0], )) if removals is True or radii is True: #birth = np.zeros(binary.shape, dtype = 'uint16'); order = 'C' if binary.flags.f_contiguous: order = 'F' death = np.zeros(binary.shape, dtype='uint16', order=order) deathflat = death.reshape(-1, order='A') with_info = True else: with_info = False # iterate if steps is None: steps = -1 step = 1 nnonrem = 0 while True: if verbose: print( '#############################################################' ) print('Iteration %d' % step) timer_iter = tmr.Timer() print(type(points), points.dtype, binary.dtype) border = cpl.convolve_3d_indices_if_smaller_than( binary, t3d.n6, points, 6) borderpoints = points[border] #borderids = np.nonzero(border)[0]; borderids = ap.where(border).array keep = np.ones(len(border), dtype=bool) if verbose: timer_iter.print_elapsed_time('Border points: %d' % (len(borderpoints), )) #if info is not None: # b = birth[borderpoints[:,0], borderpoints[:,1], borderpoints[:,2]]; # bids = b == 0; # birth[borderpoints[bids,0], borderpoints[bids,1], borderpoints[bids,2]] = step; # sub iterations remiter = 0 for i in range(12): if verbose: print( '-------------------------------------------------------------' ) print('Sub-Iteration %d' % i) timer_sub_iter = tmr.Timer() remborder = delete[cpl.convolve_3d_indices(binary, rotations[i], borderpoints)] rempoints = borderpoints[remborder] if verbose: timer_sub_iter.print_elapsed_time('Matched points : %d' % (len(rempoints), )) binary_flat[rempoints] = 0 keep[borderids[remborder]] = False rem = len(rempoints) remiter += rem #death times if with_info is True: #remo = np.logical_not(keep); deathflat[rempoints] = 12 * step + i if verbose: timer_sub_iter.print_elapsed_time('Sub-Iteration %d' % (i, )) if verbose: print( '-------------------------------------------------------------' ) #update foregroud points = points[keep] if step % 3 == 0: npts = len(points) points = points[consider[cpl.convolve_3d_indices( binary, base, points)]] nnonrem += npts - len(points) if verbose: print('Non-removable points: %d' % (npts - len(points))) if verbose: print('Foreground points : %d' % points.shape[0]) if verbose: print( '-------------------------------------------------------------' ) timer_iter.print_elapsed_time('Iteration %d' % (step, )) step += 1 if steps >= 0 and step >= steps: break if remiter == 0: break if verbose: print('#############################################################') timer.print_elapsed_time('Skeletonization done') print('Total removed: %d' % (npoints - (len(points) + nnonrem))) print('Total remaining: %d' % (len(points) + nnonrem)) if radii is True or return_points is True: points = ap.where(binary_flat).array if radii is True: #calculate average diameter as death average death of neighbourhood radii = cpl.convolve_3d_indices(death, t3d.n18, points, out_dtype='uint16') else: radii = None result = [binary] if return_points: result.append(points) if removals is True: result.append(death) if radii is not None: result.append(radii) if len(result) > 1: return tuple(result) else: return result[0]
def skeletonize(binary, points=None, steps=None, removals=False, radii=False, check_border=True, delete_border=False, return_points=False, verbose=True): """Skeletonize a binary 3d array using PK12 algorithm. Arguments --------- binary : array Binary image to skeletonize. points : array or None. Optional list of points in the binary to speed up processing. steps : int or None Number of maximal iteration steps (if None maximal reduction). removals : bool If True, returns also the steps at which the pixels in the input data where removed. radii : bool If True, the estimate of the local radius is returned. check_border : bool If True, check if the boder is empty. The algorithm reuqires this. delete_border : bool If True, delete the border. verbose : bool If True print progress info. Returns ------- skeleton : array The skeleton of the binary. points : array The point coordinates of the skeleton nx3 Note ---- The skeletonization is done in place on the binary. Copy the binary if needed for further processing. """ if verbose: print('#############################################################') print('Skeletonization PK12 [convolution]') timer = tmr.Timer() #TODO: make this work for any memmapable source ! if not isinstance(binary, np.ndarray): raise ValueError('Numpy array required for binary in skeletonization!') if binary.ndim != 3: raise ValueError('The binary array dimension is %d, 3 is required!' % binary.ndim) if delete_border: binary = t3d.delete_border(binary) check_border = False if check_border: if not t3d.check_border(binary): raise ValueError( 'The binary array needs to have no points on the border!') # detect points #points = np.array(np.nonzero(binary)).T; if points is None: points = ap.where(binary).array if verbose: timer.print_elapsed_time(head='Foreground points: %d' % (points.shape[0], )) if removals is True or radii is True: #birth = np.zeros(binary.shape, dtype = 'uint16'); death = np.zeros(binary.shape, dtype='uint16') with_info = True else: with_info = False # iterate if steps is None: steps = -1 step = 1 removed = 0 while True: if verbose: print( '#############################################################' ) print('Iteration %d' % step) timer_iter = tmr.Timer() border = cpl.convolve_3d_points(binary, t3d.n6, points) < 6 borderpoints = points[border] borderids = np.nonzero(border)[0] keep = np.ones(len(border), dtype=bool) if verbose: timer_iter.print_elapsed_time('Border points: %d' % (len(borderpoints), )) #if info is not None: # b = birth[borderpoints[:,0], borderpoints[:,1], borderpoints[:,2]]; # bids = b == 0; # birth[borderpoints[bids,0], borderpoints[bids,1], borderpoints[bids,2]] = step; # sub iterations remiter = 0 for i in range(12): if verbose: print( '-------------------------------------------------------------' ) print('Sub-Iteration %d' % i) timer_sub_iter = tmr.Timer() remborder = delete[cpl.convolve_3d_points(binary, rotations[i], borderpoints)] rempoints = borderpoints[remborder] if verbose: timer_sub_iter.print_elapsed_time('Matched points: %d' % (len(rempoints), )) binary[rempoints[:, 0], rempoints[:, 1], rempoints[:, 2]] = 0 keep[borderids[remborder]] = False rem = len(rempoints) remiter += rem removed += rem if verbose: print('Deleted points: %d' % (rem)) timer_sub_iter.print_elapsed_time('Sub-Iteration %d' % (i)) #death times if with_info is True: #remo = np.logical_not(keep); death[rempoints[:, 0], rempoints[:, 1], rempoints[:, 2]] = 12 * step + i #update foreground points = points[keep] if verbose: print('Foreground points: %d' % points.shape[0]) if verbose: print( '-------------------------------------------------------------' ) timer_iter.print_elapsed_time('Iteration %d' % (step, )) step += 1 if steps >= 0 and step >= steps: break if remiter == 0: break if verbose: print('#############################################################') print('Total removed: %d' % (removed)) print('Total remaining: %d' % (len(points))) timer.print_elapsed_time('Skeletonization') result = [binary] if return_points: result.append(points) if removals is True: result.append(death) if radii is True: #calculate average diameter as average death of neighbourhood radii = cpl.convolve_3d(death, np.array(t3d.n18, dtype='uint16'), points) result.append(radii) if len(result) > 1: return tuple(result) else: return result[0]
def match_index(index, verbose=True): if verbose and index % 2**14 == 0: print('PK12 LUT: %d / %d' % (index, 2**26)) cube = t3d.cube_from_index(index=index, center=True) return match(cube)
filename = os.path.join(os.path.dirname(os.path.abspath(__file__)), filename) #check if only compressed file exists fu.uncompress(filename) if os.path.exists(filename): return np.load(filename) else: lut = generate_lookup_table(function=function) np.save(filename, lut) return lut base = t3d.cube_base_2(center=False) """Base kernel to multiply with cube to obtain index of cube""" delete = initialize_lookup_table() """Lookup table mapping cube index to its deleteability""" keep = np.logical_not(delete) """Lookup table mapping cube index to its non-deleteability""" filename_non_removable = "PK12nr.npy" """Filename for the lookup table mapping a cube configuration to the non-removeability of the center pixel""" non_removable = initialize_lookup_table(filename=filename_non_removable, function=match_non_removable) """Lookup table mapping cube index to its non-removeability"""
def _test(): import numpy as np import ClearMap.ParallelProcessing.DataProcessing.ArrayProcessing as ap from importlib import reload reload(ap) ## Lookup table processing #apply_lut x = np.random.randint(0, 100, size=(20,30)); lut = np.arange(100) + 1; y = ap.apply_lut(x, lut) assert np.all(y == x+1) #apply_lut_to_index import ClearMap.ImageProcessing.Topology.Topology3d as t3d kernel = t3d.index_kernel(dtype=int); import ClearMap.ImageProcessing.Binary.Smoothing as sm lut = sm.initialize_lookup_table() data = np.random.randint(0, 2, (1500,300,400), dtype = bool) #reload(ap) result = ap.apply_lut_to_index(data, kernel, lut, sink=None, verbose=True) import ClearMap.Visualization.Plot3d as p3d p3d.plot([data, result]) ## Correlation #correlate1d #reload(ap) axis = 1; kernel = np.array(range(11), dtype='uint32'); data = np.random.randint(0, 2**27, (1000, 1500,100), dtype='uint32'); corr = ap.correlate1d(data, kernel, axis=axis, verbose=True, processes=10); import scipy.ndimage as ndi import ClearMap.Utils.Timer as tmr timer = tmr.Timer(); corr_ndi = ndi.correlate1d(data, kernel, axis=axis, mode='constant',cval=0); timer.print_elapsed_time('ndi') assert np.allclose(corr, corr_ndi) # # # # # #default_blocks_per_process = 10; #"""Default number of blocks per process to split the data. # #Note #---- #10 blocks per process is a good choice. #""" # #default_cutoff = 20000000; #"""Default size of array below which ordinary numpy is used. # #Note #---- #Ideally test this on your machine for different array sizes. #""" # # # #def blockRanges(data, blocks = None, processes = defaultProcesses): # """Ranges of evenly spaced blocks in array # # Arguments: # data : array # array to divide in blocks # blocks : int or None # number of blocks to split array into # processes : None or int # number of processes, if None use number of cpus # # Returns: # array # list of the range boundaries # """ # if processes is None: # processes = defaultProcesses; # if blocks is None: # blocks = processes * defaultBlocksPerProcess; # # d = data.reshape(-1, order = 'A'); # blocks = min(blocks, d.shape[0]); # return np.array(np.linspace(0, d.shape[0], blocks + 1), dtype = int); # # #def blockSums(data, blocks = None, processes = defaultProcesses): # """Sums of evenly spaced blocks in array # # Arguments: # data : array # array to perform the block sums on # blocks : int or None # number of blocks to split array into # processes : None or int # number of processes, if None use number of cpus # # Returns: # array # sums of the values in the different blocks # """ # if processes is None: # processes = defaultProcesses; # if blocks is None: # blocks = processes * defaultBlocksPerProcess; # # d = data.reshape(-1, order = 'A'); # if data.dtype == bool: # d = d.view('uint8') # # return code.blockSums1d(d, blocks = blocks, processes = processes); # # #def where(data, out = None, blocks = None, cutoff = defaultCutoff, processes = defaultProcesses): # """Returns the indices of the non-zero entries of the array # # Arguments: # data : array # array to search for nonzero indices # out : array or None # if not None results is written into this array # blocks : int or None # number of blocks to split array into for parallel processing # cutoff : int # number of elements below whih to switch to numpy.where # processes : None or int # number of processes, if None use number of cpus # # Returns: # array # positions of the nonzero entries of the input array # # Note: # Uses numpy.where if there is no match of dimension implemented! # """ # if data.ndim != 1 and data.ndim != 3: # raise Warning('Using numpy where for dimension %d and type %s!' % (data.ndim, data.dtype)) # return np.vstack(np.where(data)).T; # # if cutoff is None: # cutoff = 1; # cutoff = min(1, cutoff); # if data.size <= cutoff: # return np.vstack(np.where(data)).T; # # if processes is None: # processes = defaultProcesses; # if blocks is None: # blocks = processes * defaultBlocksPerProcess; # # if data.dtype == bool: # d = data.view('uint8') # else: # d = data; # # if out is None: # if d.ndim == 1: # sums = code.blockSums1d(d, blocks = blocks, processes = processes); # else: # sums = code.blockSums3d(d, blocks = blocks, processes = processes); # out = np.squeeze(np.zeros((np.sum(sums), data.ndim), dtype = np.int)); # else: # sums = None; # # if d.ndim == 1: # code.where1d(d, out = out, sums = sums, blocks = blocks, processes = processes); # else: # d.ndim == 3: # code.where3d(d, out = out, sums = sums, blocks = blocks, processes = processes); # # return out; # # # # #def setValue(data, indices, value, cutoff = defaultCutoff, processes = defaultProcesses): # """Set value at specified indices of an array # # Arguments: # data : array # array to search for nonzero indices # indices : array or None # list of indices to set # value : numeric or bool # value to set elements in data to # processes : None or int # number of processes, if None use number of cpus # # Returns: # array # array with specified entries set to new value # # Note: # Uses numpy if there is no match of dimension implemented! # """ # if data.ndim != 1: # raise Warning('Using numpy where for dimension %d and type %s!' % (data.ndim, data.dtype)) # data[indices] = value; # return data; # # if cutoff is None: # cutoff = 1; # cutoff = min(1, cutoff); # if data.size <= cutoff: # data[indices] = value; # return data; # # if processes is None: # processes = defaultProcesses; # # if data.dtype == bool: # d = data.view('uint8') # else: # d = data; # # code.set1d(d, indices, value, processes = processes); # # return data; # # #def setArray(data, indices, values, cutoff = defaultCutoff, processes = defaultProcesses): # """Set value at specified indices of an array # # Arguments: # data : array # array to search for nonzero indices # indices : array or None # list of indices to set # values : array # values to set elements in data to # processes : None or int # number of processes, if None use number of cpus # # Returns: # array # array with specified entries set to new value # # Note: # Uses numpy if there is no match of dimension implemented! # """ # if data.ndim != 1: # raise Warning('Using numpy where for dimension %d and type %s!' % (data.ndim, data.dtype)) # data[indices] = values; # return data; # # if cutoff is None: # cutoff = 1; # cutoff = min(1, cutoff); # if data.size <= cutoff: # data[indices] = values; # return data; # # if processes is None: # processes = defaultProcesses; # # if data.dtype == bool: # d = data.view('uint8') # else: # d = data; # # code.set1darray(d, indices, values, processes = processes); # # return data; # # # #def take(data, indices, out = None, cutoff = defaultCutoff, processes = defaultProcesses): # """Extracts the values at specified indices # # Arguments: # data : array # array to search for nonzero indices # out : array or None # if not None results is written into this array # cutoff : int # number of elements below whih to switch to numpy.where # processes : None or int # number of processes, if None use number of cpus # # Returns: # array # positions of the nonzero entries of the input array # # Note: # Uses numpy data[indices] if there is no match of dimension implemented! # """ # if data.ndim != 1: # raise Warning('Using numpy where for dimension %d and type %s!' % (data.ndim, data.dtype)) # return data[indices]; # # if cutoff is None: # cutoff = 1; # cutoff = min(1, cutoff); # if data.size < cutoff: # return data[indices]; # # if processes is None: # processes = defaultProcesses; # # if data.dtype == bool: # d = data.view('uint8') # else: # d = data; # # if out is None: # out = np.empty(len(indices), dtype = data.dtype); # if out.dtype == bool: # o = out.view('uint8'); # else: # o = out; # # code.take1d(d, indices, o, processes = processes); # # return out; # # #def match(match, indices, out = None): # """Matches a sorted list of 1d indices to another larger one # # Arguments: # match : array # array of indices to match to indices # indices : array or None # array of indices # # Returns: # array # array with specified entries set to new value # # Note: # Uses numpy if there is no match of dimension implemented! # """ # if match.ndim != 1: # raise ValueError('Match array dimension required to be 1d, found %d!' % (match.ndim)) # if indices.ndim != 1: # raise ValueError('Indices array dimension required to be 1d, found %d!' % (indices.ndim)) # # if out is None: # out = np.empty(len(match), dtype = match.dtype); # # code.match1d(match, indices, out); # # return out; # # # Find neighbours in an index list # # #def neighbours(indices, offset, processes = defaultProcesses): # """Returns all pairs of indices that are apart a specified offset""" # return code.neighbours(indices, offset = offset, processes = processes); # # #def findNeighbours(indices, center, shape, strides, mask): # """Finds all indices within a specified kernel region centered at a point""" # # if len(strides) != 3 or len(shape) != 3 or (strides[0] != 1 and strides[2] != 1): # raise RuntimeError('only 3d C or F contiguous arrays suported'); # # if isinstance(mask, int): # mask = (mask,); # if isinstance(mask, tuple): # mask = mask * 3; # return code.neighbourlistRadius(indices, center, shape[0], shape[1], shape[2], # strides[0], strides[1], strides[2], # mask[0], mask[1], mask[2]); # else: # if mask.dtype == bool: # mask = mask.view(dtype = 'uint8'); # # return code.neighbourlistMask(indices, center, shape[0], shape[1], shape[2], strides[0], strides[1], strides[2], mask); # # Loading and saving # #def readNumpyHeader(filename): # """Read numpy array information including offset to data # # Arguments: # filename : str # file name of the numpy file # # Returns: # shape : tuple # shape of the array # dtype : dtype # data type of array # order : str # 'C' for c and 'F' for fortran order # offset : int # offset in bytes to data buffer in file # """ # with open(filename, 'rb') as fhandle: # major, minor = np.lib.format.read_magic(fhandle); # shape, fortran, dtype = np.lib.format.read_array_header_1_0(fhandle); # offset = fhandle.tell() # # order = 'C'; # if fortran: # order = 'F'; # # return (shape, dtype, order, offset) # # #def _offsetFromSlice(sourceSlice, order = 'F'): # """Checks if slice is compatible with the large data loader and returns z coordiante""" # # if order == 'C': # os = 1; oe = 3; oi = 0; # else: # os = 0; oe = 2; oi = 2; # # for s in sourceSlice[os:oe]: # if s.start is not None or s.stop is not None or s.step is not None: # raise RuntimeError('sub-regions other than in slowest dimension %d not supported! slice = %r' % (oi, sourceSlice)) # # s = sourceSlice[oi]; # if s.step is not None: # raise RuntimeError('sub-regions with non unity steps not supported') # # if s.start is None: # s = 0; # else: # s = s.start; # # return s; # # #def load(filename, region = None, shared = False, blocks = None, processes = cpu_count(), verbose = False): # """Load a large npy array into memory in parallel # # Arguments: # filename : str # filename of array to load # region : Region or None # if not None this specifies the sub-region to read # shared : bool # if True read into shared memory # blocks : int or None # number of blocks to split array into for parallel processing # processes : None or int # number of processes, if None use number of cpus # verbose : bool # print info about the file to be loaded # # Returns: # array # the data as numpy array # """ # if processes is None: # processes = cpu_count(); # if blocks is None: # blocks = processes * defaultBlocksPerProcess; # # #get specs from header specs # shape, dtype, order, offset = readNumpyHeader(filename); # if verbose: # timer = tmr.Timer(); # print('Loading array of shape = %r, dtype = %r, order = %r, offset = %r' %(shape, dtype, order, offset)); # # if region is not None: # shape = region.shape(); # sourceSlice = region.sourceSlice(); # off = _offsetFromSlice(sourceSlice, order = order); # # if shared: # data = shm.create(shape, dtype = dtype, order = order); # else: # data = np.empty(shape, dtype = dtype, order = order); # # d = data.reshape(-1, order = 'A'); # if dtype == bool: # d = d.view('uint8'); # # if region is not None: # if order == 'F': # offset += data.strides[-1] * off; # else: # offset += data.strides[1] * off; # # code.load(data = d, filename = filename, offset = offset, blocks = blocks, processes = processes); # # if verbose: # timer.printElapsedTime(head = 'Loading array from %s' % filename); # # return data; # # # # #def save(filename, data, region = None, blocks = None, processes = cpu_count(), verbose = False): # """Save a large npy array to disk in parallel # # Arguments: # filename : str # filename of array to load # data : array # array to save to disk # blocks : int or None # number of blocks to split array into for parallel processing # processes : None or int # number of processes, if None use number of cpus # verbose : bool # print info about the file to be loaded # # Returns: # str # the filename of the numpy array on disk # """ # if processes is None: # processes = cpu_count(); # if blocks is None: # blocks = processes * defaultBlocksPerProcess; # # if region is None: # #create file on disk via memmap # memmap = np.lib.format.open_memmap(filename, mode = 'w+', shape = data.shape, dtype = data.dtype, fortran_order = np.isfortran(data)); # memmap.flush(); # del(memmap); # # #get specs from header specs # shape, dtype, order, offset = readNumpyHeader(filename); # if verbose: # timer = tmr.Timer(); # print('Saving array of shape = %r, dtype = %r, order = %r, offset = %r' %(shape, dtype, order, offset)); # # if (np.isfortran(data) and order != 'F') or (not np.isfortran(data) and order != 'C'): # raise RuntimeError('Order of arrays do not match isfortran=%r and order=%s' % (np.isfortran(data), order)); # # d = data.reshape(-1, order = 'A'); # if dtype == bool: # d = d.view('uint8'); # # if region is not None: # sourceSlice = region.sourceSlice(); # off = _offsetFromSlice(sourceSlice, order = order); # if order == 'F': # offset += data.strides[-1] * off; # else: # offset += data.strides[1] * off; # # #print d.dtype, filename, offset, blocks, processes # # code.save(data = d, filename = filename, offset = offset, blocks = blocks, processes = processes); # # if verbose: # timer.printElapsedTime(head = 'Saving array to %s' % filename); # # return filename; # # # # # # #if __name__ == "__main__": # # import numpy as np # from ClearMap.Utils.Timer import Timer; # import ClearMap.DataProcessing.LargeData as ld # reload(ld) # # # #dat = np.random.rand(2000,2000,1000) > 0.5; # #dat = np.random.rand(1000,1000,500) > 0.5; # dat = np.random.rand(200,300,400) > 0.5; # #datan = io.MMP.writeData('test.npy', dat); # # dat = np.load('data.npy') # xyz1 = np.load('points.npy') # # s = ld.sum(dat) # print(s == np.sum(s)) # # # timer = Timer(); # xyz = ld.where(dat) # timer.printElapsedTime('parallel') # #parallel: elapsed time: 0:00:25.807 # # timer = Timer(); # xyz1 = np.vstack(np.where(dat)).T # timer.printElapsedTime('numpy') # #numpy: elapsed time: 0:05:45.590 # # # d0 = np.zeros(dat.shape, dtype = bool); # d1 = np.zeros(dat.shape, dtype = bool); # # d0[xyz[:,0], xyz[:,1], xyz[:,2]] = True; # d1[xyz1[:,0], xyz1[:,1], xyz1[:,2]] = True; # np.all(d0 == d1) # # dat2 = np.array(np.random.rand(1000, 1000, 1000) > 0, dtype = 'bool'); # filename = 'test.npy'; # np.save(filename, dat2) # # filename = '/disque/raid/vasculature/4X-test2/170824_IgG_2/170824_IgG_16-23-46/rank_threshold.npy' # # timer = Timer(); # ldat = ld.load(filename, verbose = True); # timer.printElapsedTime('load') # #load: elapsed time: 0:00:04.867 # # timer = Timer(); # ldat2 = np.load(filename); # timer.printElapsedTime('numpy') # #numpy: elapsed time: 0:00:27.982 # # np.all(ldat == ldat2) # # timer = Timer(); # xyz = ld.where(ldat) # timer.printElapsedTime('parallel') # #parallel: elapsed time: 0:07:25.698 # # lldat = ldat.reshape(-1, order = 'A') # timer = Timer(); # xyz = ld.where(lldat) # timer.printElapsedTime('parallel 1d') # #parallel 1d: elapsed time: 0:00:49.034 # # timer = Timer(); # xyz = np.where(ldat) # timer.printElapsedTime('numpy') # # # import os # #os.remove(filename) # # filename = './ClearMap/Test/Skeletonization/test_bin.npy'; # timer = Timer(); # ldat = ld.load(filename, shared = True, verbose = True); # timer.printElapsedTime('load') # # ld.shm.isShared(ldat); # # # # import numpy as np # from ClearMap.Utils.Timer import Timer; # import ClearMap.DataProcessing.LargeData as ld # reload(ld) # # filename = 'test_save.npy'; # # dat = np.random.rand(100,200,100); # # ld.save(filename, dat) # # # dat2 = ld.load(filename) # # np.all(dat == dat2) # # os.remove(filename) # # # # import numpy as np # from ClearMap.Utils.Timer import Timer; # import ClearMap.DataProcessing.LargeData as ld # reload(ld) # # dat = np.zeros(100, dtype = bool); # dat2 = dat.copy(); # # indices = np.array([5,6,7,8,13,42]) # # ld.setValue(dat, indices, True, cutoff = 0); # # dat2[indices] = True; # np.all(dat2 == dat) # # d = ld.take(dat, indices, cutoff = 0) # np.all(d) # # # import numpy as np # from ClearMap.Utils.Timer import Timer; # import ClearMap.DataProcessing.LargeData as ld # reload(ld) # # # pts = np.array([0,1,5,6,10,11], dtype = int); # # ld.neighbours(pts, -10) # # # import numpy as np # from ClearMap.Utils.Timer import Timer; # import ClearMap.DataProcessing.LargeData as ld # import ClearMap.ImageProcessing.Filter.StructureElement as sel; # reload(ld) # # dat = np.random.rand(30,40,50) > 0.5; # mask = sel.structureElement('Disk', (5,5,5)); # indices = np.where(dat.reshape(-1))[0]; # c_id = len(indices)/2; # c = indices[c_id]; # xyz = np.unravel_index(c, dat.shape) # l = np.array(mask.shape)/2 # r = np.array(mask.shape) - l; # dlo = [max(0,xx-ll) for xx,ll in zip(xyz,l)]; # dhi = [min(xx+rr,ss) for xx,rr,ss in zip(xyz,r, dat.shape)] # mlo = [-min(0,xx-ll) for xx,ll in zip(xyz,l)]; # mhi = [mm + min(0, ss-xx-rr) for xx,rr,ss,mm in zip(xyz,r, dat.shape, mask.shape)] # # nbh = dat[dlo[0]:dhi[0], dlo[1]:dhi[1], dlo[2]:dhi[2]]; # nbhm = np.logical_and(nbh, mask[mlo[0]:mhi[0], mlo[1]:mhi[1], mlo[2]:mhi[2]] > 0); # nxyz = np.where(nbhm); # nxyz = [nn + dl for nn,dl in zip(nxyz, dlo)]; # nbi = np.ravel_multi_index(nxyz, dat.shape); # # nbs = ld.findNeighbours(indices, c_id , dat.shape, dat.strides, mask) # # nbs.sort(); # print np.all(nbs == nbi) # # # dat = np.random.rand(30,40,50) > 0.5; # indices = np.where(dat.reshape(-1))[0]; # c_id = len(indices)/2; # c = indices[c_id]; # xyz = np.unravel_index(c, dat.shape) # l = np.array([2,2,2]); # r = l + 1; # dlo = [max(0,xx-ll) for xx,ll in zip(xyz,l)]; # dhi = [min(xx+rr,ss) for xx,rr,ss in zip(xyz, r, dat.shape)] # nbh = dat[dlo[0]:dhi[0], dlo[1]:dhi[1], dlo[2]:dhi[2]]; # nxyz = np.where(nbh); # nxyz = [nn + dl for nn,dl in zip(nxyz, dlo)]; # nbi = np.ravel_multi_index(nxyz, dat.shape); # # nbs = ld.findNeighbours(indices, c_id , dat.shape, dat.strides, tuple(l)) # # nbs.sort(); # print np.all(nbs == nbi) # # print nbs # print nbi # # # import numpy as np # from ClearMap.Utils.Timer import Timer; # import ClearMap.DataProcessing.LargeData as ld # reload(ld) # # data = np.random.rand(100); # values =np.random.rand(50); # indices = np.arange(50); # ld.setArray(data, indices, values, cutoff = 1) # print np.all(data[:50] == values) # # import numpy as np # from ClearMap.Utils.Timer import Timer; # import ClearMap.DataProcessing.LargeData as ld # reload(ld) # # m = np.array([1,3,6,7,10]); # i = np.array([1,2,3,4,6,7,8,9]); # # o = ld.match(m,i) # # o2 = [np.where(i==l)[0][0] for l in m] # # #
def _test(): import numpy as np import ClearMap.ImageProcessing.Topology.Topology3d as top from importlib import reload reload(top) label = top.cube_labeled() top.print_cube(label) # Test rotations c = np.zeros((3, 3, 3), dtype=bool) c[1, 0, 0] = True top.print_cube(c) cs = [top.rotate(c, axis=2, steps=r) for r in range(4)] [top.print_cube(cc) for cc in cs] reload(top) l = top.cube_labeled() rts = top.rotations6(l) [top.print_cube(r) for r in rts] reload(top) b = top.cube_from_index(6) i = top.cube_to_index(b) print(i, 6) us = np.zeros((3, 3, 3), dtype=int) us[1, 1, 2] = 1 us[1, 0, 1] = 1 us[1, 2, 0] = 2 r12 = top.rotations12(us) [top.print_cube(cc) for cc in r12] #check configuration utlity reload(top) index = 11607 source = top.cube_from_index(index) c = top.index_from_binary(source) c[1, 1, 1] == index x = np.random.rand(1500, 500, 500) > 0.6 c = top.index_from_binary(x) import numpy as np import ClearMap.ImageProcessing.Topology.Topology3d as top #check fortran vs c order x = np.random.rand(5, 5, 5) > 0.35 y = np.asanyarray(x, order='F') ix = top.index_from_binary(x) iy = top.index_from_binary(y) ax = ix.array ay = iy.array #%% profile import io io.DEFAULT_BUFFER_SIZE = 2**32 import pstats, cProfile import numpy as np import ClearMap.ImageProcessing.Topology.Topology3d as top x = np.ones((3000, 500, 1000), dtype=bool, order='F') import ClearMap.IO.IO as io import ClearMap.ParallelProcessing.DataProcessing.ArrayProcessing as ap ap.write('test.npy', x) y = io.as_source('test.npy') z = io.create('resuly.npy', shape=y.shape, order='C', dtype='uint32') cProfile.runctx( "c =top.index_from_binary(y, method='!shared', sink=z, verbose=True, processes=None)", globals(), locals(), "Profile.prof") s = pstats.Stats("Profile.prof") s.strip_dirs().sort_stats("time").print_stats() import mmap mmap.ACCESS_COPY