def defects_to_nodes_from_slice_list(ds, seg_id): seg = ds.seg(seg_id) def defects_to_nodes_z(z): defect_nodes_slice = np.unique(seg[:, :, z]) if ds.has_seg_mask and ds.ignore_seg_value in defect_nodes_slice: defect_nodes_slice = defect_nodes_slice[ defect_nodes_slice != ds.ignore_seg_value] return list(defect_nodes_slice), len(defect_nodes_slice) * [z] with futures.ThreadPoolExecutor(max_workers=8) as executor: tasks = [] for z in ds.defect_slices: tasks.append(executor.submit(defects_to_nodes_z, z)) defect_nodes = [] nodes_z = [] for fut in tasks: nodes, zz = fut.result() if nodes: defect_nodes.extend(nodes) nodes_z.extend(zz) assert len(defect_nodes) == len(nodes_z) defect_nodes = np.array(defect_nodes, dtype='uint32') nodes_z = np.array(nodes_z, dtype='uint32') save_path = cache_name("defects_to_nodes_from_slice_list", "dset_folder", False, False, ds, seg_id) vigra.writeHDF5(nodes_z, save_path, 'nodes_z') return defect_nodes
def modified_region_features(ds, seg_id, inp_id, uv_ids, lifted_nh): modified_features = ds.region_features(seg_id, inp_id, uv_ids, lifted_nh) if not ds.defect_slices: modified_features = np.c_[ modified_features, np.logical_not(ds.edge_indications(seg_id)).astype('float32')] return modified_features skip_edges = get_skip_edges(ds, seg_id) skip_ranges = get_skip_ranges(ds, seg_id) delete_edge_ids = get_delete_edge_ids(ds, seg_id) # delete all features corresponding to delete - edges modified_features = np.delete(modified_features, delete_edge_ids, axis=0) modified_features = np.c_[modified_features, np.ones(modified_features.shape[0])] ds._region_statistics(seg_id, inp_id) region_statistics_path = cache_name("_region_statistics", "feature_folder", False, False, ds, seg_id, inp_id) # add features for the skip edges region_stats = vigra.readHDF5(region_statistics_path, 'region_statistics') fU = region_stats[skip_edges[:, 0], :] fV = region_stats[skip_edges[:, 1], :] skip_stat_feats = np.concatenate( [np.minimum(fU, fV), np.maximum(fU, fV), np.abs(fU - fV), fU + fV], axis=1) # features based on region center differences region_centers = vigra.readHDF5(region_statistics_path, 'region_centers') sU = region_centers[skip_edges[:, 0], :] sV = region_centers[skip_edges[:, 1], :] skip_center_feats = np.c_[(sU - sV)**2, skip_ranges] assert skip_center_feats.shape[0] == skip_stat_feats.shape[0] skip_features = np.concatenate([skip_stat_feats, skip_center_feats], axis=1) assert skip_features.shape[1] == modified_features.shape[1], "%s, %s" % ( str(skip_features.shape), str(modified_features.shape)) return np.concatenate([modified_features, skip_features], axis=0)
def modified_adjacency(ds, seg_id): if not ds.defect_slices: return np.array([0]) defect_nodes = defects_to_nodes_from_slice_list(ds, seg_id) nodes_z = get_defect_node_z(ds, seg_id) # make sure that z is monotonically increasing (not strictly!) assert np.all(np.diff(nodes_z.astype(int)) >= 0 ), "Defected slice index is not increasing monotonically!" defect_slices = np.unique(nodes_z) defect_node_dict = { int(z): defect_nodes[nodes_z == z].astype('uint32').tolist() for z in defect_slices } # FIXME TODO can't do this here once we have individual defect patches consecutive_defect_slices = np.split( defect_slices, np.where(np.diff(defect_slices) != 1)[0] + 1) has_lower_defect_list = [] for consec in consecutive_defect_slices: if len(consec) > 1: has_lower_defect_list.extend(consec[1:]) # iterate over the nodes in slices with defects to get delete, ignore and skip edges seg = ds.seg(seg_id) edge_indications = ds.edge_indications(seg_id) delete_edges = [ ] # the z-edges between defected and non-defected nodes that are deleted from the graph delete_edge_ids = [] ignore_edges = [ ] # the xy-edges between defected and non-defected nodes, that will be set to maximally repulsive weights skip_edges = [] # the skip edges that run over the defects in z skip_ranges = [] # z-distance of the skip edges skip_starts = [] # starting slices of the skip edges # get the delete and ignore edges by checking which uv-ids have at least one defect node uv_ids = ds._adjacent_segments(seg_id) defect_uv_indices = find_matching_indices(uv_ids, defect_nodes) for defect_index in defect_uv_indices: if edge_indications[defect_index]: # we have a xy edge -> ignore edge ignore_edges.append(uv_ids[defect_index]) else: # z edge -> delete edge delete_edges.append(uv_ids[defect_index]) delete_edge_ids.append(defect_index) delete_edges = np.array(delete_edges, dtype='uint32') delete_edge_ids = np.array(delete_edge_ids, dtype='uint32') ignore_edges = np.array(ignore_edges, dtype='uint32') # find the ignore edge ids -> corresponding to the ids after delete edges are removed ! # fist, get the uv ids after removal of uv - edges #uv_ids = np.sort(rag.uvIds(), axis = 1) uv_ids = np.delete(uv_ids, delete_edge_ids, axis=0) assert ignore_edges.shape[1] == uv_ids.shape[1] matching = find_matching_row_indices(uv_ids, ignore_edges) # make sure that all ignore edges were found assert matching.shape[0] == ignore_edges.shape[0] # get the correctly sorted the ids ignore_edge_ids = matching[:, 0] ignore_edge_ids = ignore_edge_ids[matching[:, 1]] for i, z in enumerate(defect_slices): print "Processing slice %i: %i / %i" % (z, i, len(defect_slices)) defect_nodes_z = defect_node_dict[z] # get the skip edges between adjacent slices # skip for first or last slice or slice with lower defect has_lower_defect = True if z in has_lower_defect_list else False if z == 0 or z == seg.shape[2] - 1 or has_lower_defect: continue skip_edges_z, skip_ranges_z = compute_skip_edges_z( z, seg, defect_node_dict) assert len(skip_edges_z) == len(skip_ranges_z) skip_edges.extend(skip_edges_z) skip_ranges.extend(skip_ranges_z) skip_starts.extend(len(skip_edges_z) * [z - 1]) assert skip_edges, "If we are here, we should have skip edges !" skip_edges = np.array(skip_edges, dtype=np.uint32) # make the skip edge rows unique skip_edges, idx = get_unique_rows(skip_edges, return_index=True) skip_ranges = np.array(skip_ranges, dtype=np.uint32)[idx] skip_starts = np.array(skip_starts, dtype=np.uint32)[idx] # if we have a seg mask, the skip edges can have entries connecting the ignore segment with itself, we need to remove these if ds.has_seg_mask: duplicate_mask = skip_edges[:, 0] != skip_edges[:, 1] if not duplicate_mask.all( ): # -> we have entries that will be masked out # make sure that all duplicates have ignore segment value assert (skip_edges[np.logical_not(duplicate_mask)] == ds.ignore_seg_value).all() print "Removing duplicate skip edges due to ignore segment label" skip_edges = skip_edges[duplicate_mask] skip_ranges = skip_ranges[duplicate_mask] skip_starts = skip_starts[duplicate_mask] assert skip_edges.shape[0] == skip_ranges.shape[0] assert skip_starts.shape[0] == skip_ranges.shape[0] # reorder the skip edges s.t. skip_starts are monotonically increasing sort_indices = np.argsort(skip_starts) skip_edges = skip_edges[sort_indices] skip_ranges = skip_ranges[sort_indices] skip_starts = skip_starts[sort_indices] # make sure that z is monotonically increasing (not strictly!) assert np.all(np.diff(skip_starts.astype(int)) >= 0 ), "start index of skip edges must increase monotonically." # sort the uv ids in skip edges skip_edges = np.sort(skip_edges, axis=1) # get the modified adjacency # first check if we have any duplicates in the skip edges and uv - ids # this can happen if we have a segmentation mask matches = find_matching_row_indices(uv_ids, skip_edges) if matches.size: assert ds.has_seg_mask, "There should only be duplicates in skip edges and uvs if we have a seg mask" # make sure that all removed edges are ignore edges assert all( (skip_edges[matches[:, 1]] == ds.ignore_seg_value).any(axis=1) ), "All duplicate skip edges should connect to a ignore segment" print "Removing %i skip edges that were duplicates of uv ids." % len( matches) # get a mask for the duplicates duplicate_mask = np.ones(len(skip_edges), dtype=np.bool) duplicate_mask[matches[:, 1]] = False # remove duplicates from skip edges, ranges and starts skip_edges = skip_edges[duplicate_mask] skip_ranges = skip_ranges[duplicate_mask] skip_starts = skip_starts[duplicate_mask] # new modified adjacency modified_adjacency = np.concatenate([uv_ids, skip_edges]) # save delete, ignore and skip edges, a little hacky due to stupid caching... save_path = cache_name("modified_adjacency", "dset_folder", False, False, ds, seg_id) vigra.writeHDF5(delete_edges, save_path, "delete_edges") vigra.writeHDF5(delete_edge_ids, save_path, "delete_edge_ids") vigra.writeHDF5(ignore_edges, save_path, "ignore_edges") vigra.writeHDF5(ignore_edge_ids, save_path, "ignore_edge_ids") vigra.writeHDF5(skip_edges, save_path, "skip_edges") vigra.writeHDF5(skip_ranges, save_path, "skip_ranges") vigra.writeHDF5(skip_starts, save_path, "skip_starts") return modified_adjacency
def get_skip_starts(ds, seg_id): modified_adjacency(ds, seg_id) mod_save_path = cache_name("modified_adjacency", "dset_folder", False, False, ds, seg_id) return vigra.readHDF5(mod_save_path, "skip_starts")
def get_ignore_edge_ids(ds, seg_id): modified_adjacency(ds, seg_id) mod_save_path = cache_name("modified_adjacency", "dset_folder", False, False, ds, seg_id) return vigra.readHDF5(mod_save_path, "ignore_edge_ids")
def get_defect_node_z(ds, seg_id): defects_to_nodes_from_slice_list(ds, seg_id) save_path = cache_name("defects_to_nodes_from_slice_list", "dset_folder", False, False, ds, seg_id) return vigra.readHDF5(save_path, 'nodes_z')