def test_read_duplicate_vertex_swc(): test_file = """ 1 0 -18.458370 23.227150 -84.035016 1.000000 -1 2 0 -18.159709 22.925778 -82.984344 1.000000 1 3 0 -17.861047 22.624407 -82.984344 1.000000 2 4 0 -17.562385 22.624407 -82.984344 1.000000 3 5 0 -16.965061 22.021663 -82.984344 1.000000 4 6 0 -16.965061 21.720292 -82.984344 1.000000 5 7 0 -16.069075 21.720292 -82.984344 1.000000 6 8 0 -16.069075 21.117548 -80.883000 1.000000 7 9 0 -15.770414 20.816176 -80.883000 1.000000 8 10 0 -15.770414 20.514805 -80.883000 1.000000 9 11 0 -15.770414 20.816176 -80.883000 1.000000 10 12 0 -16.069075 21.117548 -80.883000 1.000000 11 13 0 -16.069075 21.418920 -80.883000 1.000000 12 14 0 -16.069075 20.816176 -78.781655 1.000000 13 15 0 -15.471752 20.213433 -76.680311 1.000000 14 16 0 -15.471752 19.309318 -76.680311 1.000000 15 17 0 -15.471752 19.007946 -75.629639 1.000000 16 18 0 -15.173090 18.706574 -74.578966 1.000000 17 19 0 -14.874428 18.706574 -74.578966 1.000000 18 20 0 -14.575766 18.405202 -74.578966 1.000000 19 """ skel = Skeleton.from_swc(test_file) assert skel.vertices.shape[0] == 20 skel2 = Skeleton.from_swc(skel.to_swc()) assert skel2.vertices.shape[0] == 20 assert Skeleton.equivalent(skel, skel2)
def test_caching(): vol = CloudVolume('file:///tmp/cloudvolume/test-skeletons', info=info, cache=True) vol.cache.flush() skel = Skeleton( [ (0, 0, 0), (1, 0, 0), (2, 0, 0), (0, 1, 0), (0, 2, 0), (0, 3, 0), ], edges=[(0, 1), (1, 2), (3, 4), (4, 5), (3, 5)], segid=666, ) vol.skeleton.upload(skel) assert vol.cache.list_skeletons() == ['666.gz'] skel.id = 1 with open(os.path.join(vol.cache.path, 'skeletons/1'), 'wb') as f: f.write(skel.to_precomputed()) cached_skel = vol.skeleton.get(1) assert cached_skel == skel vol.cache.flush()
def test_sharded(): skel = Skeleton([ (0, 0, 0), (1, 0, 0), (2, 0, 0), (0, 1, 0), (0, 2, 0), (0, 3, 0), ], edges=[(0, 1), (1, 2), (3, 4), (4, 5), (3, 5)], segid=1, extra_attributes=[{ "id": "radius", "data_type": "float32", "num_components": 1, }]).physical_space() skels = {} for i in range(10): sk = skel.clone() sk.id = i skels[i] = sk.to_precomputed() mkdir('/tmp/removeme/skeletons/sharded/skeletons') with open('/tmp/removeme/skeletons/sharded/info', 'wt') as f: f.write(jsonify(info)) for idxenc in ('raw', 'gzip'): for dataenc in ('raw', 'gzip'): spec = ShardingSpecification( 'neuroglancer_uint64_sharded_v1', preshift_bits=1, hash='murmurhash3_x86_128', minishard_bits=2, shard_bits=1, minishard_index_encoding=idxenc, data_encoding=dataenc, ) skel_info['sharding'] = spec.to_dict() with open('/tmp/removeme/skeletons/sharded/skeletons/info', 'wt') as f: f.write(jsonify(skel_info)) files = spec.synthesize_shards(skels) for fname in files.keys(): with open('/tmp/removeme/skeletons/sharded/skeletons/' + fname, 'wb') as f: f.write(files[fname]) cv = CloudVolume('file:///tmp/removeme/skeletons/sharded/') assert cv.skeleton.meta.mip == 3 for i in range(10): sk = cv.skeleton.get(i).physical_space() sk.id = 1 assert sk == skel shutil.rmtree('/tmp/removeme/skeletons')
def test_consolidate(): skel = Skeleton( vertices=np.array([ (0, 0, 0), (1, 0, 0), (2, 0, 0), (0, 0, 0), (2, 1, 0), (2, 2, 0), (2, 2, 1), (2, 2, 2), ], dtype=np.float32), edges=np.array([ [0, 1], [1, 2], [2, 3], [3, 4], [4, 5], [5, 6], [6, 7], ], dtype=np.uint32), radii=np.array([0, 1, 2, 3, 4, 5, 6, 7], dtype=np.float32), vertex_types=np.array([0, 1, 2, 3, 4, 5, 6, 7], dtype=np.uint8), ) correct_skel = Skeleton( vertices=np.array([ (0, 0, 0), (1, 0, 0), (2, 0, 0), (2, 1, 0), (2, 2, 0), (2, 2, 1), (2, 2, 2), ], dtype=np.float32), edges=np.array([ [0, 1], [0, 2], [0, 3], [1, 2], [3, 4], [4, 5], [5, 6], ], dtype=np.uint32), radii=np.array([0, 1, 2, 4, 5, 6, 7], dtype=np.float32), vertex_types=np.array([0, 1, 2, 4, 5, 6, 7], dtype=np.uint8), ) consolidated = skel.consolidate() assert np.all(consolidated.vertices == correct_skel.vertices) assert np.all(consolidated.edges == correct_skel.edges) assert np.all(consolidated.radii == correct_skel.radii) assert np.all(consolidated.vertex_types == correct_skel.vertex_types)
def _generate_skeleton(x, min_radius=0): """Generate skeleton (of cloudvolume class) for given neuron. Parameters ---------- x : CatmaidNeuron | TreeNeuron Returns ------- skeleton : Cloud volume skeleton """ # flatten the list of the segments (sub-trees).. nodes_ordered = [n for seg in x.segments for n in seg[::-1]] # arrange the nodes in the order of segments.. this_tn = x.nodes.set_index('node_id').loc[nodes_ordered] # remove the first occurance of duplicated elements (as seglist stuff is repeated for different segments).. this_tn = this_tn[~this_tn.index.duplicated(keep='first')] this_tn['index'] = list(range(1, this_tn.shape[0] + 1)) # treenode to index.. tn2ix = this_tn['index'].to_dict() # set the rootnodes as 0.. this_tn['parent_ix'] = this_tn.parent_id.map(lambda x: tn2ix.get(x, -1)) # get the vertices now.. vertices = np.array(this_tn[['x', 'y', 'z']].values.tolist(), dtype="float32") # get the edges now.. edges = np.array(this_tn[['index', 'parent_ix']].values[1:] - 1, dtype="uint32") skeleton = Skeleton(segid=x.id, vertices=vertices, edges=edges) # set the min_radius min_radius = 0 if not isinstance(min_radius, type(None)): this_tn.loc[this_tn.radius < min_radius, 'radius'] = min_radius skeleton.radius = np.array(this_tn['radius'].values, dtype="float32") # Set Label column to 0 (undefined) this_tn['label'] = 0 # Add end/branch labels this_tn.loc[this_tn.type == 'branch', 'label'] = 5 this_tn.loc[this_tn.type == 'end', 'label'] = 6 # Add soma label if x.soma is not None: this_tn.loc[x.soma, 'label'] = 1 skeleton.vertex_types = this_tn.label return skeleton
def test_remove_disconnected_vertices(): skel = Skeleton( [(0, 0, 0), (1, 0, 0), (2, 0, 0), (0, 1, 0), (0, 2, 0), (0, 3, 0), (-1, -1, -1)], edges=[(0, 1), (1, 2), (3, 4), (4, 5), (3, 5)], segid=666, ) res = skel.remove_disconnected_vertices() assert res.vertices.shape[0] == 6 assert res.edges.shape[0] == 5 assert res.radii.shape[0] == 6 assert res.vertex_types.shape[0] == 6 assert res.id == 666
def execute(self): corgie_logger.info( f"Generate new skeleton vertices task for id {self.skeleton_id_str}" ) skeleton = get_skeleton(self.src_path, self.skeleton_id_str) if self.vertex_sort: vertex_sort = skeleton.vertices[:, 2].argsort() else: vertex_sort = np.arange(0, len(skeleton.vertices)) number_vertices = len(skeleton.vertices) index_points = list(range(0, number_vertices, self.task_vertex_size)) cf = CloudFiles(f"{self.dst_path}") array_filenames = [] for i in range(len(index_points)): start_index = index_points[i] if i + 1 == len(index_points): end_index = number_vertices else: end_index = index_points[i + 1] array_filenames.append( f"intermediary_arrays/{self.skeleton_id_str}:{start_index}-{end_index}" ) array_files = cf.get(array_filenames) # Dict to make sure arrays are concatenated in correct order array_dict = {} for array_file in array_files: array_dict[array_file["path"]] = pickle.loads( array_file["content"]) array_arrays = [] for array_filename in array_filenames: array_arrays.append(array_dict[array_filename]) array_arrays = np.concatenate(array_arrays) # Restore the correct order of the vertices restore_sort = vertex_sort.argsort() new_vertices = array_arrays[restore_sort] new_skeleton = Skeleton( vertices=new_vertices, edges=skeleton.edges, radii=skeleton.radius, vertex_types=skeleton.vertex_types, space=skeleton.space, transform=skeleton.transform, ) cf.put( path=self.skeleton_id_str, content=new_skeleton.to_precomputed(), compress="gzip", )
def remove_ticks(skeleton, threshold): """ Simple merging of individual TESAR cubes results in lots of little ticks due to the edge effect. We can remove them by thresholding the path length from a given branch to the "main body" of the neurite. We successively remove paths from shortest to longest until no branches below threshold remain. If TEASAR parameters were chosen such that they allowed for spines to be traced, this is also an opportunity to correct for that. This algorithm is O(N^2) in the number of terminal nodes. Parameters: threshold: The maximum length in nanometers that may be culled. Returns: tick free skeleton """ if skeleton.empty() or threshold == 0: return skeleton skels = [] for component in skeleton.components(): skels.append(_remove_ticks(component, threshold)) return Skeleton.simple_merge(skels).consolidate( remove_disconnected_vertices=False)
def _swc2skeleton(self, swc_file, benchmarking=False, origin=None): """Converts swc file into Skeleton object Arguments: swc_file {str} -- path to SWC file Keyword Arguments: origin {numpy array with shape (3,1)} -- origin of coordinate frame in microns, (default: None assumes (0,0,0) origin) Returns: skel {cloudvolume.Skeleton} -- Skeleton object of given SWC file """ with open(swc_file, "r") as f: contents = f.read() # get every line that starts with a hashtag comments = [ i.split(" ") for i in contents.split("\n") if i.startswith("#") ] offset = np.array( [float(j) for i in comments for j in i[2:] if "OFFSET" in i]) color = [ float(j) for i in comments for j in i[2].split(",") if "COLOR" in i ] # set alpha to 0.0 so skeleton is opaque color.append(0.0) color = np.array(color, dtype="float32") skel = Skeleton.from_swc(contents) # physical units # space can be 'physical' or 'voxel' skel.space = "physical" # hard coding parsing the id from the filename idx = swc_file.find("G") if benchmarking == True: idx1 = swc_file.find("_", swc_file.find("_") + 1) # finding second occurence of "_" idx2 = swc_file.find(".") skel.id = swc_file[idx1 + 1:idx2] else: skel.id = int(swc_file[idx + 2:idx + 5]) # hard coding changing data type of vertex_types skel.extra_attributes[-1]["data_type"] = "float32" skel.extra_attributes.append({ "id": "vertex_color", "data_type": "float32", "num_components": 4 }) # add offset to vertices # and shift by origin skel.vertices += offset if origin is not None: skel.vertices -= origin # convert from microns to nanometers skel.vertices *= 1000 skel.vertex_color = np.zeros((skel.vertices.shape[0], 4), dtype="float32") skel.vertex_color[:, :] = color return skel
def test_components(): skel = Skeleton( [ (0, 0, 0), (1, 0, 0), (2, 0, 0), (0, 1, 0), (0, 2, 0), (0, 3, 0), ], edges=[(0, 1), (1, 2), (3, 4), (4, 5), (3, 5)], segid=666, ) components = skel.components() assert len(components) == 2 assert components[0].vertices.shape[0] == 3 assert components[1].vertices.shape[0] == 3 assert components[0].edges.shape[0] == 2 assert components[1].edges.shape[0] == 3 skel1_gt = Skeleton([(0, 0, 0), (1, 0, 0), (2, 0, 0)], [(0, 1), (1, 2)]) skel2_gt = Skeleton([(0, 1, 0), (0, 2, 0), (0, 3, 0)], [(0, 1), (0, 2), (1, 2)]) assert Skeleton.equivalent(components[0], skel1_gt) assert Skeleton.equivalent(components[1], skel2_gt)
def remove_loops(skeleton): if skeleton.empty(): return skeleton skels = [] for component in skeleton.components(): skels.append(_remove_loops(component)) return Skeleton.simple_merge(skels).consolidate(remove_disconnected_vertices=False)
def get_skeletons(self, folder): skeleton_filenames = [str(skeleton_id) for skeleton_id in self.skeleton_ids] cf = CloudFiles(folder) skeleton_files = cf.get(skeleton_filenames) skeletons = {} for skeleton_file in skeleton_files: skeleton_id_str = skeleton_file["path"] skeleton = Skeleton.from_precomputed(skeleton_file["content"]) skeletons[skeleton_id_str] = skeleton return skeletons
def remove_dust(skeleton, dust_threshold): """Dust threshold in physical cable length.""" if skeleton.empty() or dust_threshold == 0: return skeleton skels = [] for skel in skeleton.components(): if skel.cable_length() > dust_threshold: skels.append(skel) return Skeleton.simple_merge(skels)
def test_downsample_joints(): skel = Skeleton( [ (2, 3, 0), # 0 (2, 2, 0), # 1 (2, 1, 0), # 2 (0, 0, 0), (1, 0, 0), (2, 0, 0), (3, 0, 0), (4, 0, 0), # 3, 4, 5, 6, 7 (2, -1, 0), # 8 (2, -2, 0), # 9 (2, -3, 0), # 10 ], edges=[(0, 1), (1, 2), (2, 5), (3, 4), (4, 5), (5, 6), (6, 7), (5, 8), (8, 9), (9, 10)], radii=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10], vertex_types=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10], segid=1337, ) ds_skel = skel.downsample(2) ds_skel_gt = Skeleton( [ (2, 3, 0), # 0 (2, 2, 0), # 1 (0, 0, 0), (2, 0, 0), (4, 0, 0), # 2, 3, 4 (2, -2, 0), # 5 (2, -3, 0), # 6 ], edges=[(0, 1), (1, 3), (2, 3), (3, 4), (3, 5), (5, 6)], radii=[0, 1, 3, 5, 7, 9, 10], vertex_types=[0, 1, 3, 5, 7, 9, 10], segid=1337, ) assert Skeleton.equivalent(ds_skel, ds_skel_gt)
def test_transform(): skelv = Skeleton([(0, 0, 0), (1, 0, 0), (1, 1, 0), (1, 1, 3), (2, 1, 3), (2, 2, 3)], edges=[(1, 0), (1, 2), (2, 3), (3, 4), (5, 4)], radii=[1, 2, 3, 4, 5, 6], vertex_types=[1, 2, 3, 4, 5, 6], segid=1337, transform=np.array([ [2, 0, 0, 0], [0, 2, 0, 0], [0, 0, 2, 0], ])) skelp = skelv.physical_space() assert np.all(skelp.vertices == skelv.vertices * 2) assert np.all(skelv.vertices == skelp.voxel_space().vertices) skelv.transform = [ [1, 0, 0, 1], [0, 1, 0, 2], [0, 0, 1, 3], ] skelp = skelv.physical_space() tmpskel = skelv.clone() tmpskel.vertices[:, 0] += 1 tmpskel.vertices[:, 1] += 2 tmpskel.vertices[:, 2] += 3 assert np.all(skelp.vertices == tmpskel.vertices) assert np.all(skelp.voxel_space().vertices == skelv.vertices)
def view(filename): """Visualize a .swc or .npy file.""" basename, ext = os.path.splitext(filename) if ext == ".swc": with open(filename, "rt") as swc: skel = Skeleton.from_swc(swc.read()) skel.viewer() elif ext == ".npy": labels = np.load(filename) cloudvolume.view(labels, segmentation=True) else: print("kimimaro: {filename} was not a .swc or .npy file.")
def test_skeleton_fidelity(): segid = 1822975381 cv = CloudVolume('gs://seunglab-test/sharded') sharded_skel = cv.skeleton.get(segid) with SimpleStorage('gs://seunglab-test/sharded') as stor: binary = stor.get_file('skeletons/' + str(segid)) unsharded_skel = Skeleton.from_precomputed( binary, segid=1822975381, vertex_attributes=cv.skeleton.meta.info['vertex_attributes']) assert sharded_skel == unsharded_skel
def test_cable_length(): skel = Skeleton([(0, 0, 0), (1, 0, 0), (2, 0, 0), (3, 0, 0), (4, 0, 0), (5, 0, 0)], edges=[(1, 0), (1, 2), (2, 3), (3, 4), (5, 4)], radii=[1, 2, 3, 4, 5, 6], vertex_types=[1, 2, 3, 4, 5, 6]) assert skel.cable_length() == (skel.vertices.shape[0] - 1) skel = Skeleton([(2, 0, 0), (1, 0, 0), (0, 0, 0), (0, 5, 0), (0, 6, 0), (0, 7, 0)], edges=[(1, 0), (1, 2), (2, 3), (3, 4), (5, 4)], radii=[1, 2, 3, 4, 5, 6], vertex_types=[1, 2, 3, 4, 5, 6]) assert skel.cable_length() == 9 skel = Skeleton([(1, 1, 1), (0, 0, 0), (1, 0, 0)], edges=[(1, 0), (1, 2)], radii=[1, 2, 3], vertex_types=[1, 2, 3]) assert abs(skel.cable_length() - (math.sqrt(3) + 1)) < 1e-6
def get_skeleton(src_path, skeleton_id_str): cf = CloudFiles(src_path) return Skeleton.from_precomputed(cf.get(skeleton_id_str))
def test_sharded(): skel = Skeleton([ (0, 0, 0), (1, 0, 0), (2, 0, 0), (0, 1, 0), (0, 2, 0), (0, 3, 0), ], edges=[(0, 1), (1, 2), (3, 4), (4, 5), (3, 5)], segid=1, extra_attributes=[{ "id": "radius", "data_type": "float32", "num_components": 1, }]).physical_space() skels = {} for i in range(10): sk = skel.clone() sk.id = i skels[i] = sk.to_precomputed() mkdir('/tmp/removeme/skeletons/sharded/skeletons') with open('/tmp/removeme/skeletons/sharded/info', 'wt') as f: f.write(jsonify(info)) for idxenc in ('raw', 'gzip'): for dataenc in ('raw', 'gzip'): spec = ShardingSpecification( 'neuroglancer_uint64_sharded_v1', preshift_bits=1, hash='murmurhash3_x86_128', minishard_bits=2, shard_bits=1, minishard_index_encoding=idxenc, data_encoding=dataenc, ) skel_info['sharding'] = spec.to_dict() with open('/tmp/removeme/skeletons/sharded/skeletons/info', 'wt') as f: f.write(jsonify(skel_info)) files = spec.synthesize_shards(skels) for fname in files.keys(): with open('/tmp/removeme/skeletons/sharded/skeletons/' + fname, 'wb') as f: f.write(files[fname]) cv = CloudVolume('file:///tmp/removeme/skeletons/sharded/') assert cv.skeleton.meta.mip == 3 for i in range(10): sk = cv.skeleton.get(i).physical_space() sk.id = 1 assert sk == skel labels = [] for fname in files.keys(): lbls = cv.skeleton.reader.list_labels(fname, path='skeletons') labels += list(lbls) labels.sort() assert labels == list(range(10)) for filename, shard in files.items(): decoded_skels = cv.skeleton.reader.disassemble_shard(shard) for label, binary in decoded_skels.items(): Skeleton.from_precomputed(binary) exists = cv.skeleton.reader.exists(list(range(11)), path='skeletons') assert exists == { 0: 'skeletons/0.shard', 1: 'skeletons/0.shard', 2: 'skeletons/0.shard', 3: 'skeletons/0.shard', 4: 'skeletons/0.shard', 5: 'skeletons/0.shard', 6: 'skeletons/0.shard', 7: 'skeletons/0.shard', 8: 'skeletons/1.shard', 9: 'skeletons/1.shard', 10: None, } shutil.rmtree('/tmp/removeme/skeletons')
def test_simple_merge(): skel1 = Skeleton( [ (0, 0, 0), (1, 0, 0), (2, 0, 0), ], edges=[ (0, 1), (1, 2), ], segid=1, ) skel2 = Skeleton( [ (0, 0, 1), (1, 0, 2), (2, 0, 3), ], edges=[ (0, 1), (1, 2), ], segid=1, ) result = Skeleton.simple_merge([skel1, skel2]) expected = Skeleton( [ (0, 0, 0), (1, 0, 0), (2, 0, 0), (0, 0, 1), (1, 0, 2), (2, 0, 3), ], edges=[(0, 1), (1, 2), (3, 4), (4, 5)], segid=1, ) assert result == expected skel1.extra_attributes = [{ "id": "wow", "data_type": "uint8", "components": 1, }] skel1.wow = np.array([1, 2, 3], dtype=np.uint8) skel2.extra_attributes = [{ "id": "wow", "data_type": "uint8", "components": 1, }] skel2.wow = np.array([4, 5, 6], dtype=np.uint8) result = Skeleton.simple_merge([skel1, skel2]) expected.wow = np.array([1, 2, 3, 4, 5, 6], dtype=np.uint8) assert result == expected skel2.extra_attributes[0]['data_type'] = np.uint8 try: Skeleton.simple_merge([skel1, skel2]) assert False except SkeletonAttributeMixingError: pass skel2.extra_attributes[0]['data_type'] = 'uint8' skel2.extra_attributes.append({ "id": "amaze", "data_type": "float32", "components": 2, }) try: Skeleton.simple_merge([skel1, skel2]) assert False except SkeletonAttributeMixingError: pass
def uploadskeletons(skelsource, skelseglist, skelnamelist, path): """Upload skeleton (of cloudvolume class) to a local server. Parameters ---------- skelsource : List containing cloud volume skeletons skelseglist : List containing the segids(skid) skelnamelist : List containing the names of skeletons path : path to the local data server Returns ------- cv : cloudvolume class object """ info = { "@type": "neuroglancer_skeletons", "transform": skelsource[0].transform.flatten(), "vertex_attributes": [{ "id": "radius", "data_type": "float32", "num_components": 1 }], "scales": "um" } path = 'file://' + path + '/precomputed' cv = CloudVolume(path, info=info) # prepare for info file cv.skeleton.meta.info['@type'] = 'neuroglancer_skeletons' cv.skeleton.meta.info['transform'] = skelsource[0].transform.flatten() cv.skeleton.meta.info['vertex_attributes'] = [{ 'id': 'radius', 'data_type': 'float32', 'num_components': 1 }] del cv.skeleton.meta.info['sharding'] del cv.skeleton.meta.info['spatial_index'] cv.skeleton.meta.info['segment_properties'] = 'seg_props' cv.skeleton.meta.commit_info() files = [ os.path.join(cv.skeleton.meta.skeleton_path, str(skel.id)) for skel in skelsource ] for fileidx in range(len(files)): fullfilepath = files[fileidx] fullfilepath = os.path.join(cv.basepath, os.path.basename(path), fullfilepath) uploadskel = Skeleton(vertices=skelsource[fileidx].vertices, edges=skelsource[fileidx].edges) print(fullfilepath) with open(fullfilepath, 'wb') as f: f.write(uploadskel.to_precomputed()) segfilepath = os.path.join(cv.basepath, os.path.basename(path), cv.skeleton.meta.skeleton_path, 'seg_props') if not os.path.exists(segfilepath): os.makedirs(segfilepath) print('creating:', segfilepath) allsegproplist = [] for segid in skelseglist: segpropdict = {} segpropdict['id'] = segid segpropdict['type'] = 'label' segpropdict['values'] = skelnamelist allsegproplist.append(segpropdict) seginfo = { "@type": "neuroglancer_segment_properties", "inline": { "ids": skelseglist, "properties": allsegproplist } } segfile = os.path.join(segfilepath, 'info') with open(segfile, 'w') as segfile: json.dump(seginfo, segfile) return cv
def uploadshardedskeletons(skelsource, skelseglist, skelnamelist, path): """Upload sharded skeletons to a local server. Parameters ---------- skelsource : List containing cloud volume skeletons skelseglist : List containing the segids(skid) skelnamelist : List containing the names of skeletons path : path to the local data server Returns ------- cv : cloudvolume class object """ info = { "@type": "neuroglancer_skeletons", "transform": skelsource[0].transform.flatten(), "vertex_attributes": [{ "id": "radius", "data_type": "float32", "num_components": 1 }], "scales": "um" } path = 'file://' + path + '/precomputed' cv = CloudVolume(path, info=info) # prepare for info file cv.skeleton.meta.info['@type'] = 'neuroglancer_skeletons' cv.skeleton.meta.info['transform'] = skelsource[0].transform.flatten() cv.skeleton.meta.info['vertex_attributes'] = [{ 'id': 'radius', 'data_type': 'float32', 'num_components': 1 }] # prepare sharding info spec = ShardingSpecification( 'neuroglancer_uint64_sharded_v1', preshift_bits=9, hash='murmurhash3_x86_128', minishard_bits=6, shard_bits=15, minishard_index_encoding='raw', data_encoding='raw', ) cv.skeleton.meta.info['sharding'] = spec.to_dict() cv.skeleton.meta.info['segment_properties'] = 'seg_props' cv.skeleton.meta.commit_info() precomputedskels = {} for skelidx in range(len(skelsource)): skelid = int(skelsource[skelidx].id) skel = Skeleton(skelsource[skelidx].vertices, edges=skelsource[skelidx].edges, segid=skelid, extra_attributes=[{ "id": "radius", "data_type": "float32", "num_components": 1, }]).physical_space() precomputedskels[skelid] = skel.to_precomputed() shardfiles = spec.synthesize_shards(precomputedskels) shardedfilepath = os.path.join(cv.basepath, os.path.basename(path), cv.skeleton.meta.skeleton_path) for fname in shardfiles.keys(): with open(shardedfilepath + '/' + fname, 'wb') as f: f.write(shardfiles[fname]) segfilepath = os.path.join(cv.basepath, os.path.basename(path), cv.skeleton.meta.skeleton_path, 'seg_props') if not os.path.exists(segfilepath): os.makedirs(segfilepath) print('creating:', segfilepath) allsegproplist = [] for segid in skelseglist: segpropdict = {} segpropdict['id'] = segid segpropdict['type'] = 'label' segpropdict['values'] = skelnamelist allsegproplist.append(segpropdict) seginfo = { "@type": "neuroglancer_segment_properties", "inline": { "ids": skelseglist, "properties": allsegproplist } } segfile = os.path.join(segfilepath, 'info') with open(segfile, 'w') as segfile: json.dump(seginfo, segfile) return cv
def test_read_swc(): # From http://research.mssm.edu/cnic/swc.html test_file = """# ORIGINAL_SOURCE NeuronStudio 0.8.80 # CREATURE # REGION # FIELD/LAYER # TYPE # CONTRIBUTOR # REFERENCE # RAW # EXTRAS # SOMA_AREA # SHINKAGE_CORRECTION 1.0 1.0 1.0 # VERSION_NUMBER 1.0 # VERSION_DATE 2007-07-24 # SCALE 1.0 1.0 1.0 1 1 14.566132 34.873772 7.857000 0.717830 -1 2 0 16.022520 33.760513 7.047000 0.463378 1 3 5 17.542000 32.604973 6.885001 0.638007 2 4 0 19.163984 32.022469 5.913000 0.602284 3 5 0 20.448090 30.822802 4.860000 0.436025 4 6 6 21.897903 28.881084 3.402000 0.471886 5 7 0 18.461960 30.289471 8.586000 0.447463 3 8 6 19.420759 28.730757 9.558000 0.496217 7""" skel = Skeleton.from_swc(test_file) assert skel.vertices.shape[0] == 8 assert skel.edges.shape[0] == 7 skel_gt = Skeleton( vertices=[[14.566132, 34.873772, 7.857000], [16.022520, 33.760513, 7.047000], [17.542000, 32.604973, 6.885001], [19.163984, 32.022469, 5.913000], [20.448090, 30.822802, 4.860000], [21.897903, 28.881084, 3.402000], [18.461960, 30.289471, 8.586000], [19.420759, 28.730757, 9.558000]], edges=[(0, 1), (1, 2), (2, 3), (3, 4), (4, 5), (2, 6), (7, 6)], radii=[ 0.717830, 0.463378, 0.638007, 0.602284, 0.436025, 0.471886, 0.447463, 0.496217 ], vertex_types=[1, 0, 5, 0, 0, 6, 0, 6], ) assert Skeleton.equivalent(skel, skel_gt) skel = Skeleton.from_swc(skel.to_swc()) assert np.all(np.abs(skel.vertices - skel_gt.vertices) < 0.00001) # sorts edges skel = skel.consolidate() skel_gt = skel_gt.consolidate() assert np.all(skel.edges == skel_gt.edges) assert np.all(np.abs(skel.radii - skel_gt.radii) < 0.00001) Nv = skel.vertices.shape[0] Ne = skel.edges.shape[0] for _ in range(10): skel = Skeleton.from_swc(skel.to_swc()) assert skel.vertices.shape[0] == Nv assert skel.edges.shape[0] == Ne
def join_close_components(skeletons, radius=None): """ Given a set of skeletons which may contain multiple connected components, attempt to connect each component to the nearest other component via the nearest two vertices. Repeat until no components remain or no points closer than `radius` are available. radius: float in same units as skeletons Returns: Skeleton """ if radius is not None and radius <= 0: raise ValueError("radius must be greater than zero: " + str(radius)) if isinstance(skeletons, Skeleton): skeletons = [skeletons] skels = [] for skeleton in skeletons: skels += skeleton.components() skels = [skl.consolidate() for skl in skels if not skl.empty()] if len(skels) == 1: return skels[0] elif len(skels) == 0: return Skeleton() while len(skels) > 1: N = len(skels) radii_matrix = np.zeros((N, N), dtype=np.float32) + np.inf index_matrix = np.zeros((N, N, 2), dtype=np.uint32) + -1 for i in range(len(skels)): for j in range(len(skels)): if i == j: continue elif radii_matrix[i, j] != np.inf: continue s1, s2 = skels[i], skels[j] dist_matrix = scipy.spatial.distance.cdist( s1.vertices, s2.vertices) radii_matrix[i, j] = np.min(dist_matrix) radii_matrix[j, i] = radii_matrix[i, j] index_matrix[i, j] = np.unravel_index(np.argmin(dist_matrix), dist_matrix.shape) index_matrix[j, i] = index_matrix[i, j] if np.all(radii_matrix) == np.inf: break min_radius = np.min(radii_matrix) if radius is not None and min_radius > radius: break i, j = np.unravel_index(np.argmin(radii_matrix), radii_matrix.shape) s1, s2 = skels[i], skels[j] fused = Skeleton.simple_merge([s1, s2]) fused.edges = np.concatenate([ fused.edges, [[ index_matrix[i, j, 0], index_matrix[i, j, 1] + s1.vertices.shape[0] ]] ]) skels[i] = None skels[j] = None skels = [_ for _ in skels if _ is not None] + [fused] return Skeleton.simple_merge(skels).consolidate()
def test_downsample(): skel = Skeleton( [(0, 0, 0), (1, 0, 0), (1, 1, 0), (1, 1, 3), (2, 1, 3), (2, 2, 3)], edges=[(1, 0), (1, 2), (2, 3), (3, 4), (5, 4)], radii=[1, 2, 3, 4, 5, 6], vertex_types=[1, 2, 3, 4, 5, 6], segid=1337, ) def should_error(x): try: skel.downsample(x) assert False except ValueError: pass should_error(-1) should_error(0) should_error(.5) should_error(2.00000000000001) dskel = skel.downsample(1) assert Skeleton.equivalent(dskel, skel) assert dskel.id == skel.id assert dskel.id == 1337 dskel = skel.downsample(2) dskel_gt = Skeleton([(0, 0, 0), (1, 1, 0), (2, 1, 3), (2, 2, 3)], edges=[(1, 0), (1, 2), (2, 3)], radii=[1, 3, 5, 6], vertex_types=[1, 3, 5, 6]) assert Skeleton.equivalent(dskel, dskel_gt) dskel = skel.downsample(3) dskel_gt = Skeleton( [(0, 0, 0), (1, 1, 3), (2, 2, 3)], edges=[(1, 0), (1, 2)], radii=[1, 4, 6], vertex_types=[1, 4, 6], ) assert Skeleton.equivalent(dskel, dskel_gt) skel = Skeleton([(0, 0, 0), (1, 0, 0), (1, 1, 0), (1, 1, 3), (2, 1, 3), (2, 2, 3)], edges=[(1, 0), (1, 2), (3, 4), (5, 4)], radii=[1, 2, 3, 4, 5, 6], vertex_types=[1, 2, 3, 4, 5, 6]) dskel = skel.downsample(2) dskel_gt = Skeleton([(0, 0, 0), (1, 1, 0), (1, 1, 3), (2, 2, 3)], edges=[(1, 0), (2, 3)], radii=[1, 3, 4, 6], vertex_types=[1, 3, 4, 6]) assert Skeleton.equivalent(dskel, dskel_gt)
def test_equivalent(): assert Skeleton.equivalent(Skeleton(), Skeleton()) identity = Skeleton([(0, 0, 0), (1, 0, 0)], [(0, 1)]) assert Skeleton.equivalent(identity, identity) diffvertex = Skeleton([(0, 0, 0), (0, 1, 0)], [(0, 1)]) assert not Skeleton.equivalent(identity, diffvertex) single1 = Skeleton([(0, 0, 0), (1, 0, 0)], edges=[(1, 0)]) single2 = Skeleton([(0, 0, 0), (1, 0, 0)], edges=[(0, 1)]) assert Skeleton.equivalent(single1, single2) double1 = Skeleton([(0, 0, 0), (1, 0, 0)], edges=[(1, 0)]) double2 = Skeleton([(0, 0, 0), (1, 0, 0)], edges=[(0, 1)]) assert Skeleton.equivalent(double1, double2) double1 = Skeleton([(0, 0, 0), (1, 0, 0), (1, 1, 0)], edges=[(1, 0), (1, 2)]) double2 = Skeleton([(0, 0, 0), (1, 0, 0), (1, 1, 0)], edges=[(2, 1), (0, 1)]) assert Skeleton.equivalent(double1, double2) double1 = Skeleton([(0, 0, 0), (1, 0, 0), (1, 1, 0), (1, 1, 3)], edges=[(1, 0), (1, 2), (1, 3)]) double2 = Skeleton([(0, 0, 0), (1, 0, 0), (1, 1, 0), (1, 1, 3)], edges=[(3, 1), (2, 1), (0, 1)]) assert Skeleton.equivalent(double1, double2)