def find_shortest_path(ipl, penaltypower, bounds, disttransf, locmax): # Modify distancetransform # # a) Invert: the lowest values (i.e. the lowest penalty for the shortest path detection) should be at the center of # the current process disttransf = lib.invert_image(disttransf) # # b) Set all values outside the process to infinity disttransf = lib.filter_values(disttransf, np.amax(disttransf), type='eq', setto=np.inf) # # c) Increase the value difference between pixels near the boundaries and pixels central within the processes # This increases the likelihood of the paths to follow the center of processes, thus avoiding short-cuts disttransf = lib.power(disttransf, penaltypower) # Get local maxima indices = np.where(locmax) coords = zip(indices[0], indices[1], indices[2]) ipl.logging('Local maxima coordinates: {}', coords) # Make pairwise list of coordinates that will serve as source and target pairs = [] for i in xrange(0, len(coords)-1): for j in xrange(i+1, len(coords)): pairs.append((coords[i], coords[j])) paths, pathim = lib.shortest_paths(disttransf, pairs, bounds=bounds, hfp=ipl) # # Make sure no empty paths lists are returned # paths = [x for x in paths if x.any()] return paths, pathim
def find_shortest_path(ipl, penaltypower, bounds, disttransf, locmax, labels, labelgroup): # Modify distancetransform # # a) Invert: the lowest values (i.e. the lowest penalty for the shortest path detection) should be at the center of # the current process disttransf = lib.invert_image(disttransf) # # b) Set all values outside the process to infinity disttransf = lib.filter_values(disttransf, np.amax(disttransf), type='eq', setto=np.inf) # # c) Increase the value difference between pixels near the boundaries and pixels central within the processes # This increases the likelihood of the paths to follow the center of processes, thus avoiding short-cuts disttransf = lib.power(disttransf, penaltypower) # The situation: # We have multiple objects (n > 1) of unknown number. # We want to extract the local maxima within each object individually and create a # list of all possible partners (pairs) # Each partner of a pair has to be located within a different object (label area) # # Approach 1: # For each locmax iterate over all other locmaxs and write pairs, which satisfy the # desired condition, to the pairs list # # Approach 2: # For each label object iterate over all the others and append all possible locmax # pairs to the pairs list # Probably faster than approach 1 when implemented correctly? Someone should test that... # Approach 2 in its implemented form pairs = [] for i in xrange(0, len(labelgroup) - 1): indices_i = np.where((labels == labelgroup[i]) & (locmax > 0)) indices_i = zip(indices_i[0], indices_i[1], indices_i[2]) if indices_i: for j in xrange(i + 1, len(labelgroup)): indices_j = np.where((labels == labelgroup[j]) & (locmax > 0)) indices_j = zip(indices_j[0], indices_j[1], indices_j[2]) if indices_j: ipl.logging('Ind_i = {}\nInd_j = {}', indices_i, indices_j) # Now, lets do some magic! pairs = pairs + zip(indices_i * len(indices_j), sorted(indices_j * len(indices_i))) paths, pathim = lib.shortest_paths(disttransf, pairs, bounds=bounds, hfp=ipl) return paths, pathim
def find_shortest_path(ipl, penaltypower, bounds, disttransf, locmax, labels, labelgroup): # Modify distancetransform # # a) Invert: the lowest values (i.e. the lowest penalty for the shortest path detection) should be at the center of # the current process disttransf = lib.invert_image(disttransf) # # b) Set all values outside the process to infinity disttransf = lib.filter_values(disttransf, np.amax(disttransf), type='eq', setto=np.inf) # # c) Increase the value difference between pixels near the boundaries and pixels central within the processes # This increases the likelihood of the paths to follow the center of processes, thus avoiding short-cuts disttransf = lib.power(disttransf, penaltypower) # The situation: # We have multiple objects (n > 1) of unknown number. # We want to extract the local maxima within each object individually and create a # list of all possible partners (pairs) # Each partner of a pair has to be located within a different object (label area) # # Approach 1: # For each locmax iterate over all other locmaxs and write pairs, which satisfy the # desired condition, to the pairs list # # Approach 2: # For each label object iterate over all the others and append all possible locmax # pairs to the pairs list # Probably faster than approach 1 when implemented correctly? Someone should test that... # Approach 2 in its implemented form pairs = [] for i in xrange(0, len(labelgroup)-1): indices_i = np.where((labels == labelgroup[i]) & (locmax > 0)) indices_i = zip(indices_i[0], indices_i[1], indices_i[2]) if indices_i: for j in xrange(i+1, len(labelgroup)): indices_j = np.where((labels == labelgroup[j]) & (locmax > 0)) indices_j = zip(indices_j[0], indices_j[1], indices_j[2]) if indices_j: ipl.logging('Ind_i = {}\nInd_j = {}', indices_i, indices_j) # Now, lets do some magic! pairs = pairs + zip(indices_i * len(indices_j), sorted(indices_j * len(indices_i))) paths, pathim = lib.shortest_paths(disttransf, pairs, bounds=bounds, hfp=ipl) return paths, pathim
def find_shortest_path(hfp, penaltypower, bounds, disttransf, locmax): # Modify distancetransform # # a) Invert: the lowest values (i.e. the lowest penalty for the shortest path detection) should be at the center of # the current process disttransf = lib.invert_image(disttransf) # # b) Set all values outside the process to infinity disttransf = lib.filter_values(disttransf, np.amax(disttransf), type='eq', setto=np.inf) # # c) Increase the value difference between pixels near the boundaries and pixels central within the processes # This increases the likelihood of the paths to follow the center of processes, thus avoiding short-cuts disttransf = lib.power(disttransf, penaltypower) # Get local maxima indices = np.where(locmax) coords = zip(indices[0], indices[1], indices[2]) hfp.logging('Local maxima coordinates: {}', coords) # Make pairwise list of coordinates that will serve as source and target pairs = [] for i in xrange(0, len(coords) - 1): for j in xrange(i + 1, len(coords)): pairs.append((coords[i], coords[j])) paths, pathim = lib.shortest_paths(disttransf, pairs, bounds=bounds, hfp=hfp) # # Make sure no empty paths lists are returned # paths = [x for x in paths if x.any()] return paths, pathim
def shortest_paths(penaltypower, bounds, lbl, keylist_lblim, gt, disttransf, pathends, for_class=True, correspondence={}, avoid_duplicates=True, max_paths_per_object=[], max_paths_per_object_seed=[], yield_in_bounds=False, return_pathim=True, minimum_alternative_label_count=0, logger=None): """ :param penaltypower: :param bounds: :param lbl: :param keylist_lblim: Needed for correspondence table :param disttransf: :param pathends: :param for_class: True: paths are computed for when endpoints are in the same ground truth oject False: paths are computed for when endpoints are in different ground truth objects :param correspondence: :param avoid_duplicates: :param max_paths_per_object: :param max_paths_per_object_seed: :param yield_in_bounds: :param return_pathim: :param minimum_alternative_label_count: Paths of merges (for_class=False) are removed if too little pixels of the merged object are found :param logger: :return: """ # Pick up some statistics along the way stats_excluded_paths = 0 statistics = Rdict() # Determine the endpoints of the current object indices = np.where(pathends) coords = zip(indices[0], indices[1], indices[2]) # Make pairwise list of coordinates serving as source and target # First determine all pairings all_pairs = [] for i in xrange(0, len(coords) - 1): for j in xrange(i + 1, len(coords)): all_pairs.append((coords[i], coords[j])) # And only use those that satisfy certain criteria: # a) Are in either the same gt object (for_class=True) # or in different gt objects (for_class=False) # b) Are not in the correspondence list pairs = [] label_pairs = [] # if avoid_duplicates: new_correspondence = {} for pair in all_pairs: # Determine whether the endpoints are in different gt objects if (gt[pair[0]] == gt[pair[1]]) == for_class: # Check correspondence list if pairings were already computed in different image labelpair = tuple(sorted([gt[pair[0]], gt[pair[1]]])) if avoid_duplicates: if labelpair not in correspondence.keys(): pairs.append(pair) label_pairs.append(labelpair) # new_correspondence[labelpair] = [keylist_lblim, lbl] if logger is not None: logger.logging('Found pairing: {}', labelpair) else: if logger is not None: logger.logging( 'Pairing already in correspondence table: {}', labelpair) else: pairs.append(pair) if logger is not None: logger.logging('Found pairing: {}', labelpair) # if avoid_duplicates: # correspondence.update(new_correspondence) # Select a certain number of pairs if number is too high if max_paths_per_object: if len(pairs) > max_paths_per_object: if logger is not None: logger.logging('Reducing number of pairs to {}', max_paths_per_object) if max_paths_per_object_seed: random.seed(max_paths_per_object_seed) else: random.seed() pairs = random.sample(pairs, max_paths_per_object) if logger is not None: logger.logging('Modified pairs list: {}', pairs) # If pairs are found that satisfy all conditions if pairs: if logger is not None: logger.logging('Found {} pairings which satisfy all criteria', len(pairs)) else: print 'Found {} pairings which satisfy all criteria'.format( len(pairs)) # Pre-processing of the distance transform # a) Invert: the lowest values (i.e. the lowest penalty for the shortest path # detection) should be at the center of the current process disttransf = lib.invert_image(disttransf) # # b) Set all values outside the process to infinity disttransf = lib.filter_values(disttransf, np.amax(disttransf), type='eq', setto=np.inf) # # c) Increase the value difference between pixels near the boundaries and pixels # central within the processes. This increases the likelihood of the paths to # follow the center of processes, thus avoiding short-cuts disttransf = lib.power(disttransf, penaltypower) # Compute the shortest paths according to the pairs list ps_computed, ps_in_bounds = lib.shortest_paths( disttransf, pairs, bounds=bounds, logger=logger, return_pathim=return_pathim, yield_in_bounds=yield_in_bounds) # Criteria for keeping paths which can only be computed after path computation if for_class: # A path without merge must not switch labels on the way! ps = [] for i in xrange(0, len(ps_computed)): if len( np.unique(gt[ps_in_bounds[i][:, 0], ps_in_bounds[i][:, 1], ps_in_bounds[i][:, 2]])) == 1: ps.append(ps_computed[i]) if logger is not None: logger.logging('Path label = True') # Add entry to correspondence table if avoid_duplicates: new_correspondence[label_pairs[i]] = [ keylist_lblim, lbl ] else: # The path switched objects multiple times on the way and is not added to the list\ if logger is not None: logger.logging( 'Path starting and ending in label = {} had multiple labels and was excluded', gt[tuple(ps_in_bounds[i][0])]) stats_excluded_paths += 1 else: ps = [] for i in xrange(0, len(ps_computed)): un, counts = np.unique(gt[ps_in_bounds[i][:, 0], ps_in_bounds[i][:, 1], ps_in_bounds[i][:, 2]], return_counts=True) # At least two of the entries in counts have to be larger than the threshold c = 0 for count in counts: if count >= minimum_alternative_label_count: c += 1 if c > 1: break if c > 1: ps.append(ps_computed[i]) # Add entry to correspondence table if avoid_duplicates: new_correspondence[label_pairs[i]] = [ keylist_lblim, lbl ] else: if logger is not None: logger.logging( 'Path starting in label {} and ending in {} only crossed one of the labels for {} voxels', gt[tuple(ps_in_bounds[i][0])], gt[tuple(ps_in_bounds[i][-1])], np.min(counts)) statistics['excluded_paths'] = stats_excluded_paths statistics['kept_paths'] = len(ps) return ps, new_correspondence, statistics else: statistics['excluded_paths'] = 0 statistics['kept_paths'] = 0 return [], new_correspondence, statistics