def format_labels(labels, in_place): if in_place: labels = fastremap.asfortranarray(labels) else: labels = np.copy(labels, order='F') if labels.dtype == np.bool: labels = labels.view(np.uint8) original_shape = labels.shape while labels.ndim < 3: labels = labels[..., np.newaxis] while labels.ndim > 3: if labels.shape[-1] == 1: labels = labels[..., 0] else: raise DimensionError( "Input labels may be no more than three non-trivial dimensions. Got: {}" .format(original_shape)) return labels
def test_asfortranarray(): dtypes = list(DTYPES) + [np.float32, np.float64, np.bool] for dtype in dtypes: print(dtype) for dim in (1, 4, 7, 9, 27, 31, 100, 127, 200): print(dim) x = np.arange(dim**1).reshape((dim)).astype(dtype) y = np.copy(x) assert np.all(np.asfortranarray(x) == fastremap.asfortranarray(y)) x = np.arange(dim**2).reshape((dim, dim)).astype(dtype) y = np.copy(x) assert np.all(np.asfortranarray(x) == fastremap.asfortranarray(y)) x = np.arange(dim**3).reshape((dim, dim, dim)).astype(dtype) y = np.copy(x) assert np.all(np.asfortranarray(x) == fastremap.asfortranarray(y)) x = np.arange(dim**2 + dim).reshape((dim, dim + 1)).astype(dtype) y = np.copy(x) assert np.all(np.asfortranarray(x) == fastremap.asfortranarray(y)) x = np.arange(dim**3 + dim * dim).reshape( (dim, dim + 1, dim)).astype(dtype) y = np.copy(x) assert np.all(np.asfortranarray(x) == fastremap.asfortranarray(y)) if dim < 100: x = np.arange(dim**4).reshape( (dim, dim, dim, dim)).astype(dtype) y = np.copy(x) assert np.all( np.asfortranarray(x) == fastremap.asfortranarray(y)) x = np.arange(dim**4 + dim * dim * dim).reshape( (dim + 1, dim, dim, dim)).astype(dtype) y = np.copy(x) assert np.all( np.asfortranarray(x) == fastremap.asfortranarray(y))
def skeletonize(all_labels, teasar_params=DEFAULT_TEASAR_PARAMS, anisotropy=(1, 1, 1), object_ids=None, dust_threshold=1000, cc_safety_factor=1, progress=False, fix_branching=True, in_place=False): """ Skeletonize all non-zero labels in a given 2D or 3D image. Required: all_labels: a 2D or 3D numpy array of integer type (signed or unsigned) Optional: anisotropy: the physical dimensions of each axis (e.g. 4nm x 4nm x 40nm) object_ids: If not none, zero out all labels other than those specified here. teasar_params: { scale: during the "rolling ball" invalidation phase, multiply the DBF value by this. const: during the "rolling ball" invalidation phase, this is the minimum radius in chosen physical units (i.e. nm). soma_detection_threshold: if object has a DBF value larger than this, root will be placed at largest DBF value and special one time invalidation will be run over that root location (see soma_invalidation scale) expressed in chosen physical units (i.e. nm) pdrf_scale: scale factor in front of dbf, used to weight dbf over euclidean distance (higher to pay more attention to dbf) (default 5000) pdrf_exponent: exponent in dbf formula on distance from edge, faster if factor of 2 (default 16) soma_invalidation_scale: the 'scale' factor used in the one time soma root invalidation (default .5) soma_invalidation_const: the 'const' factor used in the one time soma root invalidation (default 0) (units in chosen physical units (i.e. nm)) } dust_threshold: don't bother skeletonizing connected components smaller than this many voxels. cc_safety_factor: Value between 0 and 1 that scales the size of the disjoint set maps in connected_components. 1 is guaranteed to work, but is probably excessive and corresponds to every pixel being a different label. Use smaller values to save some memory. progress: if true, display a progress bar fix_branching: When enabled, zero the edge weights by of previously traced paths. This causes branch points to occur closer to the actual path divergence. However, there is a performance penalty associated with this as dijkstra's algorithm is computed once per a path rather than once per a skeleton. in_place: if true, allow input labels to be modified to reduce memory usage and possibly improve performance. Returns: { $segid: cloudvolume.PrecomputedSkeleton, ... } """ if all_labels.ndim not in (2, 3): raise DimensionError( "Can only skeletonize arrays of dimension 2 or 3.") if in_place: all_labels = fastremap.asfortranarray(all_labels) else: all_labels = np.copy(all_labels, order='F') if all_labels.ndim == 2: all_labels = all_labels[..., np.newaxis] anisotropy = np.array(anisotropy, dtype=np.float32) all_labels = apply_object_mask(all_labels, object_ids) if not np.any(all_labels): return {} cc_labels, remapping = compute_cc_labels(all_labels, cc_safety_factor) del all_labels all_dbf = edt.edt( cc_labels, anisotropy=anisotropy, black_border=False, order='F', ) # slows things down, but saves memory # max_all_dbf = np.max(all_dbf) # if max_all_dbf < np.finfo(np.float16).max: # all_dbf = all_dbf.astype(np.float16) cc_segids, pxct = np.unique(cc_labels, return_counts=True) cc_segids = [ sid for sid, ct in zip(cc_segids, pxct) if ct > dust_threshold ] all_slices = scipy.ndimage.find_objects(cc_labels) skeletons = defaultdict(list) for segid in tqdm(cc_segids, disable=(not progress), desc="Skeletonizing Labels"): if segid == 0: continue # Crop DBF to ROI slices = all_slices[segid - 1] if slices is None: continue labels = cc_labels[slices] labels = (labels == segid) dbf = (labels * all_dbf[slices]).astype(np.float32) roi = Bbox.from_slices(slices) skeleton = kimimaro.trace.trace(labels, dbf, anisotropy=anisotropy, fix_branching=fix_branching, **teasar_params) skeleton.vertices[:, 0] += roi.minpt.x skeleton.vertices[:, 1] += roi.minpt.y skeleton.vertices[:, 2] += roi.minpt.z if skeleton.empty(): continue orig_segid = remapping[segid] skeleton.id = orig_segid skeleton.vertices *= anisotropy skeletons[orig_segid].append(skeleton) return merge(skeletons)
import fastremap import numpy as np x = np.ones((512, 512, 512), dtype=np.float32) x = fastremap.asfortranarray(x) print(x) print(x.flags) print(x.strides) print(x.dtype) # @profile # def run(): # x = np.ones( (512,512,512), dtype=np.uint32, order='C') # x += 1 # print(x.strides, x.flags) # y = np.asfortranarray(x) # print(x.strides, x.flags) # print("done.") # run()