Пример #1
0
 def update(self, time_passed):
     epsilon = 0.01
     time_passed = time_passed if time_passed != 0 else epsilon
     for i in range(self.steps_per_frame):
         milliseconds = float(time_passed) / self.steps_per_frame
         if _DEBUG_PRINT_MS:
             self.mss += [milliseconds]
             _mss = self.mss[10:]
             if (len(_mss) % 100):
                 print "milliseconds=%s" % _mss[len(_mss) - 1]
         if self.coord_changed:
             point_matrix = numpy.array([d.coord for d in self.devices])
             kdtree = KDTree(point_matrix)
             for d in self.devices:
                 d._nbrs = []
             for (i, j) in kdtree.query_pairs(self.radio_range):
                 self.devices[i]._nbrs += [self.devices[j]]
                 self.devices[j]._nbrs += [self.devices[i]]
             for d in self.devices:
                 d._nbr_range = _Field()
                 for n in d._nbrs + [d]:
                     delta = d.coord - n.coord
                     d._nbr_range[n] = numpy.dot(delta, delta)**0.5
         self.coord_changed = False
         for d in self.devices:
             d.dostep(milliseconds)
Пример #2
0
 def update(self, time_passed):
     epsilon = 0.01
     time_passed = time_passed if time_passed != 0 else epsilon
     for i in range(self.steps_per_frame):
         milliseconds = float(time_passed) / self.steps_per_frame
         if _DEBUG_PRINT_MS:
             self.mss += [milliseconds]
             _mss = self.mss[10:]
             if(len(_mss) % 100):
                 print "milliseconds=%s" % _mss[len(_mss) - 1]
         if self.coord_changed:
             point_matrix = numpy.array([d.coord for d in self.devices])
             kdtree = KDTree(point_matrix)
             for d in self.devices:
                 d._nbrs = []
             for (i, j) in kdtree.query_pairs(self.radio_range):
                 self.devices[i]._nbrs += [self.devices[j]]
                 self.devices[j]._nbrs += [self.devices[i]]
             for d in self.devices:
                 d._nbr_range = _Field()
                 d._nbr_vec = _Field()
                 for n in d._nbrs + [d]:
                     delta = n.coord - d.coord
                     d._nbr_range[n] = numpy.dot(delta, delta) ** 0.5
                     d._nbr_vec[n] = delta
         self.coord_changed = False
         for d in self.devices:
             d.dostep(milliseconds)
Пример #3
0
    def calc_mask_unique(self, thr=3):
        m = np.zeros(shape=self.points2d.shape, dtype=np.bool)
        size_list = []
        for j in range(self.points2d.shape[1]):
            mask = np.zeros(self.points2d.shape, np.bool)
            mask[:, j, :] = True
            # mask = np.logical_and(mask, self.points2d != 0)
            if np.sum(mask) == 0:
                continue

            r, c, _ = np.nonzero(mask)
            r = r[::2]
            c = c[::2]

            pts = self[mask]
            kd_tree = KDTree(pts)
            res = kd_tree.query_pairs(r=thr, p=2)
            for (p1, p2) in res:
                if mask[r[p1], c[p1], 0] and mask[r[p2], c[p2], 0]:
                    mask[r[p2], c[p2], :] = False

            size_list.append(np.sum(mask) / 2.0)
            m = np.logical_or(m, mask)
        self.mask_unique = m
        print("Camera {} after pruning: {}".format(self.cam_id, size_list))
        return m
Пример #4
0
def daves_super_saturate(atoms):
    pos = atoms.get_positions()
    tree = KDTree(atoms.get_positions())
    list_tree = list(tree.query_pairs(1.430))
    bondedTo = [ [] for i in xrange(len(atoms))] 

    for bond in list_tree:
        bondedTo[bond[0]].append(bond[1])
        bondedTo[bond[1]].append(bond[0])

    Zs = atoms. get_atomic_numbers()
# figure out what needs a hydrogen atom
    for iatom in xrange(len(atoms)):
        nbonds = len( bondedTo[iatom] )
        Z = Zs[iatom]
        if (Z,nbonds) == (6,2):
            print "we should add H to atom ", iatom
            

            r0 = pos[iatom, :]
            bond1 = pos[ bondedTo[iatom][0] , : ] - r0
            bond2 = pos[ bondedTo[iatom][1],   :]  -r0
            rH = -(bond1 + bond2)
            rH = 1.09 * rH / np.linalg.norm(rH)
            atoms.append(Atom('H',  r0+rH ))
Пример #5
0
def gravity_model_contact_events(agents: List[Agent],
                                 positions: np.array,
                                 env: simpy.Environment,
                                 rng: np.random.Generator):
    tree = KDTree(data=positions)
    close_pairs = list(tree.query_pairs(r=contact_distance_upper_bound))
    inverse_distances = np.array(
        [np.linalg.norm(positions[idx1] - positions[idx2]) ** -contact_rate_gravity_exponent
         for idx1, idx2 in close_pairs])
    inverse_distances /= inverse_distances.sum()

    while True:
        choices = rng.choice(a=close_pairs, p=inverse_distances, size=len(agents)).tolist()
        for choice in choices:
            yield env.timeout(delay=rng.exponential(scale=1 / len(agents) / contact_rate_per_individual))
            contact_agents = [agents[idx]
                              for idx in choice
                              if not agents[idx].state.symptomatic()
                              or rng.uniform() > p_symptomatic_individual_isolates]

            if len(contact_agents) < 2:
                # Symptomatic self-isolation means this is no longer a contact event and doesn't need
                # recording. Skip to the next event.
                continue

            infected = get_infected(contact_agents, rng=rng)
            for i in infected:
                env.process(generator=infection_events(env=env, infected=i, rng=rng))
Пример #6
0
def build_bonded_to(pos):
    tree = KDTree(pos)
    list_tree = list(tree.query_pairs(1.430))
    bondedTo = [[] for i in range(len(pos))]
    for bond in list_tree:
        bondedTo[bond[0]].append(bond[1])
        bondedTo[bond[1]].append(bond[0])
    return bondedTo
Пример #7
0
def cluster_over_time_with_merging(df, thresh, outfi=None):
    """ Cluster a set of fire detections over time
    :param df: DataFrame active fire detections with x and y variables
    :param thresh: threshold for clustering
    :return: (df, merge_dict_dict): the DataFrame with a cluster id added on and a dictionary of merged clusters
    """
    min_year = int(np.min(df.year))
    max_year = int(np.max(df.year))
    clust_dict = dict()
    # merge_dict_dict = dict()
    clust2nodes, nodes2clust, merge_dict = (None, None, None)

    for year in xrange(min_year, max_year + 1):
        # Build up dictionary of nearest neighbors to make the building process easier
        annual_fires = df[df.year == year]
        annual_kd = KDTree(np.column_stack((annual_fires.x, annual_fires.y)))
        pairs_list = annual_kd.query_pairs(thresh)
        print
        "len of pairs list: " + str(len(pairs_list))
        neighbors_dict = dict()
        for i, j in pairs_list:
            i_name, j_name = (annual_fires.index[i], annual_fires.index[j])
            if i_name not in neighbors_dict:
                neighbors_dict[i_name] = set()
            neighbors_dict[i_name].add(j_name)
            if j_name not in neighbors_dict:
                neighbors_dict[j_name] = set()
            neighbors_dict[j_name].add(i_name)
        print
        "done building dict"
        # Get initial clusters on day 1
        first_day = max(FIRE_SEASON[0], np.min(annual_fires.dayofyear))
        last_day = min(FIRE_SEASON[1], np.max(annual_fires.dayofyear))

        for day in xrange(first_day, last_day):
            daily_fires = annual_fires[annual_fires.dayofyear == day]
            clust2nodes, nodes2clust, merge_dict = find_daily_clusters(
                daily_fires,
                neighbors_dict,
                clust2nodes=clust2nodes,
                nodes2clust=nodes2clust,
                merge_dict=merge_dict)
            for node, clust in nodes2clust.iteritems():
                if node not in clust_dict:
                    clust_dict[node] = clust

        # merge_dict_dict[year] = merge_dict
        print
        "%d merges in year %d" % (len(merge_dict), year)
    df['cluster'] = pd.Series(clust_dict, dtype=int)

    if outfi:
        with open(outfi + "_cluster.pkl", "w") as fout:
            cPickle.dump(df, fout, cPickle.HIGHEST_PROTOCOL)
        with open(outfi + "_merge_dict.pkl", "w") as fout:
            cPickle.dump(merge_dict, fout, cPickle.HIGHEST_PROTOCOL)
    return df, merge_dict
Пример #8
0
def image_to_graph(data):  # image data as input
    graph = nx.Graph()  # un-directed empty graph to be filled

    pixels = np.transpose(np.nonzero(data))  # nonzero pixels, row,col format
    kdt = KDTree(pixels)
    pairs = kdt.query_pairs(1.5)  #set of tuples of pair indices incl diagonal
    for pair in pairs:
        graph.add_edge(tuple(pixels[pair[0]]), tuple(pixels[pair[1]]))
    return graph
Пример #9
0
    def gen_edges(self, radius):
        self.radius = radius
        tree = KDTree(self.nodes)

        edges = tree.query_pairs(radius, output_type='ndarray')
        edges = np.concatenate((edges, np.flip(edges, 1)))

        self.receivers = torch.tensor(edges[:, 0], dtype=torch.int64)
        self.senders = torch.tensor(edges[:, 1], dtype=torch.int64)
        self.n_edges = self.receivers.size(0)
        self.edges = torch.zeros(self.n_edges, 1)
Пример #10
0
def rand_points():
    N = 90
    pts = 4000 * np.random.random((N, 2))
    pts = pts.astype(int)

    tree = KDTree(pts)
    too_close = tree.query_pairs(220)
    too_close = [i[0] for i in too_close]
    pts = np.delete(pts, too_close, 0)
    print(f'Number of particles: {pts.shape[0]}')
    return (pts)
 def radius_nn_graph(self, radius):
     # Case 1 - Sensors communicate their info only within a particular radius of themselves
     tree_obj = KDTree(self.dist)
     pair_points = tree_obj.query_pairs(radius, p=2.0, eps=0)
     pair_points_list = list(pair_points)
     for i in range(len(pair_points)):
         pta, ptb = pair_points_list[i]
         dist_pts = norm(self.dist[pta] - self.dist[ptb], 2)
         self.dict_index[pta].append(ptb)
         self.dict_index[ptb].append(pta)
         self.dict_weight[pta].append(dist_pts)
         self.dict_weight[ptb].append(dist_pts)
 def radius_nn_graph(self, radius):
     # Case 1 - Sensors communicate their info only within a particular radius of themselves
     tree_obj = KDTree(self.dist)
     pair_points = tree_obj.query_pairs(radius,p=2.0,eps=0)
     pair_points_list = list(pair_points)
     for i in range(len(pair_points)):
         pta, ptb = pair_points_list[i]
         dist_pts = norm(self.dist[pta] - self.dist[ptb],2)
         self.dict_index[pta].append(ptb)
         self.dict_index[ptb].append(pta)
         self.dict_weight[pta].append(dist_pts)
         self.dict_weight[ptb].append(dist_pts)
Пример #13
0
def thinning(coords, thinning_distance):
    """
    Removes points from coords such that no two points remain within thinning_distance of each other
    """
    kdtree = KDTree(coords)
    removed = set()

    for a, b in sorted(kdtree.query_pairs(thinning_distance)):
        # At least one of a or b should be removed:
        if a not in removed and b not in removed:
            removed.add(b)

    return np.array([coord for i, coord in enumerate(coords) if i not in removed])
 def gaussian_nn_graph(self, radius):
     # Case 4 - A gaussian communication based on distance, (probabilistic)
     tree_obj = KDTree(self.dist)
     pair_points = tree_obj.query_pairs(radius, p=2.0, eps=0)
     pair_points_list = list(pair_points)
     for i in range(len(pair_points)):
         pta, ptb = pair_points_list[i]
         dist_pts = norm(self.dist[pta] - self.dist[ptb], 2)
         if dist_pts < rand(1):
             self.dict_index[pta].append(ptb)
             self.dict_index[ptb].append(pta)
             self.dict_weight[pta].append(dist_pts)
             self.dict_weight[ptb].append(dist_pts)
 def gaussian_nn_graph(self, radius):
     # Case 4 - A gaussian communication based on distance, (probabilistic)
     tree_obj = KDTree(self.dist)
     pair_points = tree_obj.query_pairs(radius,p=2.0,eps=0)
     pair_points_list = list(pair_points)
     for i in range(len(pair_points)):
         pta, ptb = pair_points_list[i]
         dist_pts = norm(self.dist[pta] - self.dist[ptb],2)
         if dist_pts < rand(1):
             self.dict_index[pta].append(ptb)
             self.dict_index[ptb].append(pta)
             self.dict_weight[pta].append(dist_pts)
             self.dict_weight[ptb].append(dist_pts)
Пример #16
0
def compute_coordination_number_by_cutoff_distance(points_input, radius_input):
    maxdistance = 3.0 * radius_input[0]
    kdtree = KDTree(points_input)
    pairs = list(kdtree.query_pairs(maxdistance))
    cutoff_bonds = []
    for a in range(len(points_input)):
        cutoff_bonds.append([])
    for a in range(len(pairs)):
        cutoff_bonds[pairs[a][0]].append(pairs[a][1])
        cutoff_bonds[pairs[a][1]].append(pairs[a][0])
    coordination_number_by_cutoff_distance_in = []
    for a in range(len(cutoff_bonds)):
        coordination_number_by_cutoff_distance_in.append(len(cutoff_bonds[a]))
    return coordination_number_by_cutoff_distance_in
Пример #17
0
def connected(noiselist):
	gr = nx.Graph()
	infolist = [trh.meta_info for trh in noiselist] # last in meta info
	infoarray = np.array(infolist)
	#print 'noise info array: ',infoarray # after clustering, no side issue
	layer_column = infoarray[:,4:] # layer number and column number
	lclist = infoarray[:,4:].tolist()
	kdt = KDTree(layer_column)
	pairs = kdt.query_pairs(1.5) #set of tuples of pair indices
	for pair in pairs:
		gr.add_edge(tuple(lclist[pair[0]]),tuple(lclist[pair[1]]))	
		
	gg = nx.connected_component_subgraphs(gr)
	return gg
Пример #18
0
    def __call__(self, tracks, falarms):
        # Skip if there are one or no tracks. (one because you can't occlude yourself).
        if len(tracks) <= 1:
            return

        # False mergers in this algorithm is only done
        # between tracks.  False Alarms are not included.
        trackStrms = Tracks2Cells(tracks)

        frames = np.arange(trackStrms['frameNums'].min(),
                           trackStrms['frameNums'].max() + 1)

        # Go frame by frame to see which storms could be occluded.
        for frameIndex in frames:
            strmCells = trackStrms[trackStrms['frameNums'] == frameIndex]

            # Don't bother if there are only one or no strmCells for this moment in time
            if len(strmCells) <= 1:
                continue

            tree = KDTree(zip(strmCells['xLocs'], strmCells['yLocs']))

            candidatePairs = list(tree.query_pairs(self._false_merge_dist))
            pointsRemoved = set([])
            for aPair in candidatePairs:
                if aPair[0] in pointsRemoved or aPair[1] in pointsRemoved:
                    # One of these points have already been removed, skip this pair
                    continue

                strm1Cell = strmCells[aPair[0]]
                strm2Cell = strmCells[aPair[1]]
                strm1TrackID = strm1Cell['trackID']
                strm2TrackID = strm2Cell['trackID']

                if (len(tracks[strm1TrackID]) > 3
                        and len(tracks[strm2TrackID]) > 2 and
                    (np.random.uniform(0.1, 1.0) *
                     np.hypot(strm1Cell['xLocs'] - strm2Cell['xLocs'],
                              strm1Cell['yLocs'] - strm2Cell['yLocs']) /
                     self._false_merge_dist < self._false_merge_prob)):
                    # If the tracks are long enough, and the PRNG determines that a false
                    #   merger should occur, then add this point to the list and then
                    #   rebuild the list of points for the track without the removed point.
                    pointsRemoved.add(aPair[0])
                    tracks[strm1TrackID] = tracks[strm1TrackID][
                        strm1Cell['cornerIDs'] != tracks[strm1TrackID]
                        ['cornerIDs']]
Пример #19
0
    def check_clashes(self, distance=0.77):
        """Check if any atoms are too close to each other. This is important since too close atoms in the elastic network models can be very bad for the results.

        Notes:
            * This function will be moved to the molecule manipulation package later

        
        Args:
            distance (float): The distance cutoff user wishes to defined as a clash radius (default:0.77; max bond length)
        
        Returns:
            clashes (int)   : Number of clashes present according to the set cutoff.
        """
        from scipy.spatial import KDTree
        all_atoms = [i for i in self.get_atoms()]
        T = KDTree([i.get_location() for i in all_atoms])
        return len(T.query_pairs(distance))
Пример #20
0
    def __call__(self, tracks, falarms) :
        # Skip if there are one or no tracks. (one because you can't occlude yourself).
        if len(tracks) <= 1 :
            return

        # False mergers in this algorithm is only done
        # between tracks.  False Alarms are not included.
        trackStrms = Tracks2Cells(tracks)

        frames = np.arange(trackStrms['frameNums'].min(),
                              trackStrms['frameNums'].max() + 1)

        # Go frame by frame to see which storms could be occluded.
        for frameIndex in frames :
            strmCells = trackStrms[trackStrms['frameNums'] == frameIndex]

            # Don't bother if there are only one or no strmCells for this moment in time
            if len(strmCells) <= 1 :
                continue

            tree = KDTree(zip(strmCells['xLocs'], strmCells['yLocs']))

            candidatePairs = list(tree.query_pairs(self._false_merge_dist))
            pointsRemoved = set([])
            for aPair in candidatePairs :
                if aPair[0] in pointsRemoved or aPair[1] in pointsRemoved :
                    # One of these points have already been removed, skip this pair
                    continue

                strm1Cell = strmCells[aPair[0]]
                strm2Cell = strmCells[aPair[1]]
                strm1TrackID = strm1Cell['trackID']
                strm2TrackID = strm2Cell['trackID']

                if (len(tracks[strm1TrackID]) > 3
                    and len(tracks[strm2TrackID]) > 2
                    and (np.random.uniform(0.1, 1.0) *
                         np.hypot(strm1Cell['xLocs'] - strm2Cell['xLocs'],
                                  strm1Cell['yLocs'] - strm2Cell['yLocs'])
                         / self._false_merge_dist < self._false_merge_prob)) :
                    # If the tracks are long enough, and the PRNG determines that a false
                    #   merger should occur, then add this point to the list and then
                    #   rebuild the list of points for the track without the removed point.
                    pointsRemoved.add(aPair[0])
                    tracks[strm1TrackID] = tracks[strm1TrackID][strm1Cell['cornerIDs']
                                                  != tracks[strm1TrackID]['cornerIDs']]
Пример #21
0
    def find_duplicate_post_on_same_neuron(self,
                                           seg: Chunk,
                                           distance_threshold: float = 10
                                           ) -> set:
        """find duplicate post synapses on the same neuron
        The T-bar could be split to two or three in a long distance

        Args:
            seg (Chunk): neuron segmentation chunk
            distance_threshold (float, optional): distance lower than this threshold is regarded as duplicate. Defaults to 10.
        
        Return:
            duplicate_indices (set[int]): a set of post synapse indices that are detected as duplicates.
        """
        post_coord = self.post_coordinates - np.asarray(seg.bbox.minpt,
                                                        dtype=self.post.dtype)
        kdtree = KDTree(post_coord, leafsize=2)
        pairs = kdtree.query_pairs(distance_threshold,
                                   p=2.0,
                                   eps=0,
                                   output_type='set')

        distances = self.distances_from_pre_to_post

        duplicated_indices = set()

        def find_segid(seg: Chunk, coord: np.ndarray):
            if coord[0] >= seg.shape[0] or coord[1] >= seg.shape[1] or coord[
                    2] >= seg.shape[2]:
                return None
            else:
                return seg[coord[0], coord[1], coord[2]]

        for idx0, idx1 in pairs:
            sid0 = find_segid(seg, post_coord[idx0, :])
            sid1 = find_segid(seg, post_coord[idx1, :])
            if sid0 is not None and sid1 is not None and sid0 == sid1 and sid0 > 0:
                if distances[idx0] > distances[idx1]:
                    duplicated_indices.add(idx0)
                else:
                    duplicated_indices.add(idx1)

        return duplicated_indices
Пример #22
0
def find_edge_atoms(atoms):
    edge_atoms = []
    pos = atoms.get_positions()
    tree = KDTree(atoms.get_positions())
    list_tree = list(tree.query_pairs(1.430))
    bondedTo = [ [] for i in xrange(len(atoms))] 

    for bond in list_tree:
        bondedTo[bond[0]].append(bond[1])
        bondedTo[bond[1]].append(bond[0])

    Zs = atoms. get_atomic_numbers()
    # figure out what needs a hydrogen atom
    for iatom in xrange(len(atoms)):
        nbonds = len( bondedTo[iatom] )
        Z = Zs[iatom]
        if (Z,nbonds) == (6,2):
            edge_atoms.append(iatom)
    return edge_atoms
Пример #23
0
def close_neighbors(points, distance):

    tri = Delaunay(points)
    kdt = KDTree(points)

    indices, indptr = tri.vertex_neighbor_vertices

    neighbors = {}
    for k in range(len(tri.points)):
        neighbors[k] = indptr[indices[k]:indices[k + 1]]

    pairs = defaultdict(list)
    for a, b in kdt.query_pairs(distance):
        pairs[a] += [b]
        pairs[b] += [a]

    for a in neighbors:
        neighbors[a] = list(set(neighbors[a]) & set(pairs[a]))

    return neighbors
Пример #24
0
 def get_object_groups(self, conf_map):
     """
     Group objects within certain radius
     :param conf_map:
     :return:
     """
     # get connected components
     im_binary = conf_map >= self.min_th
     im_label = measure.label(im_binary)
     reg_props = measure.regionprops(im_label, conf_map)
     # remove regions that are smaller than threshold
     reg_props = [a for a in reg_props if a.area >= self.min_region]
     # group objects
     centroids = self._reg_to_centroids(reg_props)
     if len(centroids) > 0:
         kdt = KDTree(centroids)
         connect_pair = kdt.query_pairs(self.link_r, eps=self.eps)
         groups = self._group_pairs(connect_pair, reg_props)
         return groups
     else:
         return []
Пример #25
0
def connect_graph_locally(g1, distance_threshold):
    positions = []
    id = 0
    for v in g1.get_vertex_iterator():
        positions.append(np.array(g1.get_position(v)))
        assert (v == id)
        id += 1

    kdtree = KDTree(positions)
    pairs = kdtree.query_pairs(distance_threshold, p=2.0, eps=0)

    for edge in pairs:
        if g1.get_partner(edge[0]) != edge[1]:
            """
            Only connect edges that have not been
            connected before. Can happen in context area.
            """
            try:
                e = g1.add_edge(*edge)
            except AssertionError:
                pass

    return g1
Пример #26
0
def feature_matching(features):
    indices = []
    for i in range(0, len(features)):
        indices += [i] * len(features[i])
    coordinates = []
    for i in range(0, len(features)):
        coordinates += [feature[0] for feature in features[i]]
    orientations = []
    for i in range(0, len(features)):
        orientations += [feature[1] for feature in features[i]]
    descriptions = []
    for i in range(0, len(features)):
        descriptions += [feature[2] for feature in features[i]]
    print(len(descriptions))
    features_list = [[
        indices[i], coordinates[i], orientations[i], descriptions[i]
    ] for i in range(len(orientations))]
    tree = KDTree(data=descriptions, leafsize=50)
    pair_list = list(tree.query_pairs(r=0.3))
    feature_pair_list = [
        ((indices[i1], coordinates[i1]), (indices[i2], coordinates[i2]))
        for (i1, i2) in pair_list if indices[i1] != indices[i2]
    ]
    return feature_pair_list
Пример #27
0
def test_query_pairs_single_node():
    tree = KDTree([[0, 1]])
    assert_equal(tree.query_pairs(0.5), set())
Пример #28
0
def test_query_pairs_single_node():
    tree = KDTree([[0, 1]])
    assert_equal(tree.query_pairs(0.5), set())
 def generate_pairs_if_needed(self, posns):
     self.hits += 1
     if self.hits % self.n == 1:  # implicitly do initialization
         tree = KDTree(posns)
         self.pairs = tree.query_pairs(self.d)
     return self.pairs
Пример #30
0
class Tree:
    def __init__(self, parent_map, position, max_root_length):
        self.parent_map = parent_map
        self.center_x, self.center_z = position

        self.max_root_length = max_root_length
        self.max_radius = max_root_length

        self.root = (self.center_x, CORE_HEIGHT, self.center_z)
        self.nodes = [self.root]
        self.genNodes()

        self.genGraph()
        self.genTree()

        self.genBranches()

    def genNodesInChunk(self, chunk_x, chunk_z):

        nodes_in_chunk = set()

        top_nodes, top_height = zip(
            *self.parent_map.getLocalTopNodes(chunk_x, chunk_z))
        bot_nodes, bot_height = zip(
            *self.parent_map.getLocalBotNodes(chunk_x, chunk_z))

        top_tree = KDTree(top_nodes)
        bot_tree = KDTree(bot_nodes)

        def queryNode(x, y, z):
            return bot_height[bot_tree.query(
                (x, z))[1]] < y < 255 - top_height[top_tree.query((x, z))[1]]

        coord_displacement_x = chunk_x * BLOCKS_PER_CHUNK
        coord_displacement_z = chunk_z * BLOCKS_PER_CHUNK

        for coord_displacement_y in range(0, 256, 16):
            for _ in range(coord_displacement_y * NODE_ATTEMPTS_PER_SECTION //
                           128):
                x, y, z = random.randint(0, 15), random.randint(
                    0, 15), random.randint(0, 15)
                y += coord_displacement_y
                if queryNode(x, y, z):
                    nodes_in_chunk.add((coord_displacement_x + x, y,
                                        coord_displacement_z + z))
        self.nodes.extend(nodes_in_chunk)

    def genNodes(self):
        filename = "{mapname}_tree_{x},{z}_nodes".format(
            mapname=self.parent_map.name, x=self.center_x, z=self.center_z)
        if filename in os.listdir(os.getcwd()):
            timer = WaitTimer("Reading nodes for tree at {},{}".format(
                self.center_x, self.center_z))
            with open(filename, "rb") as f:
                self.nodes = rick.load(f)
            self.node_amount = len(self.nodes)
            timer.finish()
            return

        timer = WaitTimer("Generating nodes for tree at {},{}".format(
            self.center_x, self.center_z))
        for chunk_x, chunk_z in Box(
            (self.center_x - self.max_radius) // BLOCKS_PER_CHUNK,
            (self.center_x + self.max_radius) // BLOCKS_PER_CHUNK + 1,
            (self.center_z - self.max_radius) // BLOCKS_PER_CHUNK,
            (self.center_z + self.max_radius) // BLOCKS_PER_CHUNK + 1,
        )():
            chunk_center_x, chunk_center_z = int(
                (chunk_x + 0.5) * BLOCKS_PER_CHUNK), int(
                    (chunk_z + 0.5) * BLOCKS_PER_CHUNK)
            if np.linalg.norm(
                (self.center_x - chunk_center_x,
                 self.center_z - chunk_center_z)) > self.max_radius:
                continue
            self.genNodesInChunk(chunk_x, chunk_z)
        self.node_amount = len(self.nodes)

        with open(filename, "wb") as f:
            rick.dump(self.nodes, f)

        timer.finish()

    def genGraph(self):
        filename = "{mapname}_tree_{x},{z}_edges".format(
            mapname=self.parent_map.name, x=self.center_x, z=self.center_z)
        if filename in os.listdir(os.getcwd()):
            timer = WaitTimer("Reading edges for tree at {},{}".format(
                self.center_x, self.center_z))
            with open(filename, "rb") as f:
                self.graph = rick.load(f)
            timer.finish()
            return

        timer = WaitTimer("Connecting nodes for tree at {},{}".format(
            self.center_x, self.center_z))

        self.graph = [[] for _ in range(self.node_amount)]
        self.kdtree = KDTree(self.nodes)

        bot_height_function = self.parent_map.getTopographIn(
            "bot", self.center_x // BLOCKS_PER_REGION,
            self.center_z // BLOCKS_PER_REGION)
        top_height_function = self.parent_map.getTopographIn(
            "top", self.center_x // BLOCKS_PER_REGION,
            self.center_z // BLOCKS_PER_REGION)

        def query_node(node):
            x, y, z = node
            return bot_height_function(
                x, z) < y < 256 - top_height_function(x, z)

        for first_node, second_node in self.kdtree.query_pairs(
                MAX_SINGLE_BRANCH_SPAN):
            first_point, second_point = np.array(
                self.nodes[first_node]), np.array(self.nodes[second_node])
            if query_node(0.5 * (first_point + second_point)):
                distance = np.linalg.norm(first_point - second_point)
                self.graph[first_node].append((second_node, distance))
                self.graph[second_node].append((first_node, distance))

        root_neighbors = self.kdtree.query(self.root, ROOT_NEIGHBOR_AMOUNT + 1)
        self.graph[0].extend([(root_neighbors[1][index],
                               root_neighbors[0][index])
                              for index in range(1, ROOT_NEIGHBOR_AMOUNT + 1)])

        with open(filename, "wb") as f:
            rick.dump(self.graph, f)

        timer.finish()

    def genTree(self):

        #parent_node podria ser un bool "explored"

        timer = WaitTimer("Picking branches for tree at {},{}".format(
            self.center_x, self.center_z))
        parent_node = [None] * self.node_amount
        distance = [None] * len(self.graph)
        self.child_nodes = [[] for _ in range(self.node_amount)]

        parent_node[0] = 0  #self.root
        distance[0] = 0

        #nodes a pqueue: ((antecessor,distància,node),prioritat)

        branch_queue = PriorityQueue([((0, weight, neighbor),
                                       DJIKSTRA_KRUSKAL_RATIO * weight)
                                      for neighbor, weight in self.graph[0]])
        while branch_queue:
            parent, edge_weight, current_node = branch_queue.pop()
            if parent_node[current_node] != None:
                continue  #is in tree
            parent_node[current_node] = parent
            self.child_nodes[parent].append(current_node)
            current_distance = distance[parent] + edge_weight
            distance[current_node] = current_distance
            for neighbor, new_edge_weight in self.graph[current_node]:
                branch_queue.push((current_node, new_edge_weight, neighbor),
                                  current_distance +
                                  DJIKSTRA_KRUSKAL_RATIO * new_edge_weight)
        timer.finish()

    def genBranches(self):
        timer = WaitTimer("Generating branches for tree at {},{}".format(
            self.center_x, self.center_z))
        self.strahler = [-1] * self.node_amount
        self.branches = []
        root_strahler, root_branch = self.genBranches_recursive(0)
        if len(root_branch) > 1:
            self.branches.append(Branch(self, root_branch, root_strahler))
        timer.finish()

    def genBranches_recursive(self, node):
        #readable > efficient
        child_info = [
            self.genBranches_recursive(child)
            for child in self.child_nodes[node]
        ]

        if child_info:
            child_strahler = [
                strahler_num_child
                for strahler_num_child, branch_child in child_info
            ]
            max_child_strahler = max(child_strahler)
            if child_strahler.count(max_child_strahler) > 1:
                strahler_num = max_child_strahler + 1
            else:
                strahler_num = max_child_strahler

            branch = [self.nodes[node]]
            for strahler_num_child, branch_child in child_info:
                if strahler_num_child < strahler_num:
                    self.branches.append(
                        Branch(self, [self.nodes[node]] + branch_child,
                               strahler_num_child))
                else:
                    branch.extend(branch_child)
        else:
            strahler_num, branch = 1, [self.nodes[node]]

        self.strahler[node] = strahler_num
        return strahler_num, branch

    def drawOnMap(self, resolution, min_branch_width=1):
        draw = ImageDraw.Draw(self.parent_map.map)

        pixels_per_region = BLOCKS_PER_REGION // resolution
        coord_displacement = self.parent_map.region_radius * pixels_per_region

        for branch in self.branches:
            if branch.width >= min_branch_width:
                for node1, node2 in zip(branch.nodes, branch.nodes[1:]):
                    p1 = (node1[0] // resolution + coord_displacement,
                          node1[2] // resolution + coord_displacement)
                    p2 = (node2[0] // resolution + coord_displacement,
                          node2[2] // resolution + coord_displacement)
                    draw.line(p1 + p2, fill=255,
                              width=1)  #branch.width+1-min_branch_width)
        del draw

    def genGraph_wnx(self):
        timer = WaitTimer("Connecting nodes for tree at {},{}".format(
            self.center_x, self.center_z))
        #NO COMPROVEM SI ELS NODES QUE AFEGIM NO FAN QUE LES BRANQUES ATRAVESSIN PARETS,
        #perque hauria de passar poc igualment i no es poc raonable que una branca atravessi el terra

        self.graph = nx.Graph()
        self.graph.add_nodes_from(((index, {
            "position": position
        }) for index, position in enumerate(self.nodes)))
        self.kdtree = KDTree(self.nodes)

        self.graph.add_weighted_edges_from(
            ((first_node_index, second_node_index,
              np.linalg.norm(
                  np.array(self.nodes[first_node_index]) -
                  np.array(self.nodes[second_node_index])))
             for first_node_index, second_node_index in
             self.kdtree.query_pairs(MAX_SINGLE_BRANCH_SPAN)))

        root_neighbors = self.kdtree.query(self.root, ROOT_NEIGHBOR_AMOUNT + 1)
        self.graph.add_weighted_edges_from(
            ((0, root_neighbors[1][index], root_neighbors[0][index])
             for index in range(1, ROOT_NEIGHBOR_AMOUNT + 1)))

        timer.finish()

    def genTree_wnx(self):

        timer = WaitTimer("Picking branches for tree at {},{}".format(
            self.center_x, self.center_z))
        parent_node = [None] * self.node_amount
        distance = [None] * len(self.graph)
        self.child_nodes = [[] for _ in range(self.node_amount)]

        parent_node[0] = 0  #self.root
        distance[0] = 0

        #nodes a pqueue: ((antecessor,distància,node),prioritat)

        branch_queue = PriorityQueue([
            ((0, self.graph[0][neighbor]["weight"], neighbor),
             DJIKSTRA_KRUSKAL_RATIO * self.graph[0][neighbor]["weight"])
            for neighbor in self.graph[0]
        ])
        while branch_queue:
            parent, edge_weight, current_node = branch_queue.pop()
            if parent_node[current_node] != None:
                continue  #is in tree
            parent_node[current_node] = parent
            self.child_nodes[parent].append(current_node)
            current_distance = distance[parent] + edge_weight
            distance[current_node] = current_distance
            for neighbor in self.graph[current_node]:
                new_edge_weight = self.graph[current_node][neighbor]["weight"]
                branch_queue.push((current_node, new_edge_weight, neighbor),
                                  current_distance +
                                  DJIKSTRA_KRUSKAL_RATIO * new_edge_weight)
        timer.finish()

    def computeStrahler(self):
        timer = WaitTimer(
            "Computing strahler numbers for tree at {},{}".format(
                self.center_x, self.center_z))
        self.strahler = [-1] * self.node_amount
        self.computeStrahler_recursive(0)
        timer.finish()
        print(self.strahler[0])

    def computeStrahler_recursive(self, node):
        child_strahlers = [
            self.computeStrahler_recursive(child)
            for child in self.child_nodes[node]
        ]
        strahler_num = 0
        for child_strahler_num in child_strahlers:
            if strahler_num == child_strahler_num:
                strahler_num += 1
            strahler_num = max(strahler_num, child_strahler_num)
        self.strahler[node] = strahler_num
        return strahler_num

    def genNodesInChunk_old(self, chunk_x, chunk_z):
        """equiprobable segons altura"""

        top_nodes, top_height = zip(
            *self.parent_map.getLocalTopNodes(chunk_x, chunk_z))
        bot_nodes, bot_height = zip(
            *self.parent_map.getLocalBotNodes(chunk_x, chunk_z))

        top_tree = KDTree(top_nodes)
        bot_tree = KDTree(bot_nodes)

        def queryNode(x, y, z):
            return bot_height[bot_tree.query(
                (x, z))[1]] < y < 255 - top_height[top_tree.query((x, z))[1]]

        coord_displacement_x = chunk_x * BLOCKS_PER_CHUNK
        coord_displacement_z = chunk_z * BLOCKS_PER_CHUNK
        for coord_displacement_y in range(0, 256, 16):
            for _ in range(NODE_ATTEMPTS_PER_SECTION):
                x, y, z = random.randint(0, 15), random.randint(
                    0, 15), random.randint(0, 15)
                y += coord_displacement_y
                if queryNode(x, y, z):
                    self.nodes.append((coord_displacement_x + x, y,
                                       coord_displacement_z + z))
Пример #31
0
import numpy as np
from scipy.spatial import KDTree
from scipy.sparse import csc_matrix
from scipy.sparse.csgraph import connected_components

data = np.loadtxt('input', delimiter=',')
N = data.shape[0]

tree = KDTree(data)
pairs = tree.query_pairs(3, p=1)
rows, cols = list(zip(*pairs))
sparse_graph = csc_matrix((np.ones_like(rows), (rows, cols)), (N, N))

ans = connected_components(sparse_graph, directed=False, return_labels=False)
print(ans)
Пример #32
0
def nitrogenate_all_zig_zag(nx_min, nx_max, nz_min, nz_max, method="am1", optimize_geometry=0, make_symmetric=0, saturate_nitrogens=0):
    all_N_SCF_energy_list, all_N_HOMO_energy_list, all_N_LUMO_energy_list = (np.array([]) for dummy_var in xrange(3))
    pltylabel_list = "SCF Energy", "H**O Energy", "LUMO Energy"
    sheet_dimensions, sheet_count = ([] for dummy_var in xrange(2))
    for nx in xrange(nx_min, nx_max+1, 2):
        for nz in xrange(nz_min, nz_max+1, 2):

            if make_symmetric == 1:
                atoms = build_sheet(nx, nz, symmetry=1)
                symmetry_folder_string = "_symmetric"
                addition = [0]
                multiplication = []
                edge_carbon_index =[]

                for number in xrange(0, 2*nx):
                    addition.append(number)
                for number in xrange(2, 2*(nx+2), 2):
                    multiplication.append(number)
                    multiplication.append(number)
                for value in xrange(0, len(addition)):
                    edge_carbon_index.append(nz*multiplication[value]+addition[value])
                edge_carbon_index.pop(1)
                edge_carbon_index[:] = [x-len(to_be_removed) for x in edge_carbon_index]

            elif make_symmetric == 0:
                atoms = build_sheet(nx, nz, symmetry=0)
                symmetry_folder_string = "_asymmetric"

            if saturate_nitrogens == 1:
                for edge_carbon in edge_carbon_index:
                    symbols = atoms.get_chemical_symbols()
                    symbols[edge_carbon] = 'N'
                    atoms.set_chemical_symbols(symbols)
                pos = atoms.get_positions()
                tree = KDTree(atoms.get_positions())
                list_tree = list(tree.query_pairs(1.430))
                bondedTo = [[] for i in xrange(len(atoms))]
                for bond in list_tree:
                    bondedTo[bond[0]].append(bond[1])
                    bondedTo[bond[1]].append(bond[0])
                Zs = atoms.get_atomic_numbers()
                for iatom in xrange(len(atoms)):
                    nbonds = len(bondedTo[iatom])
                    Z = Zs[iatom]
                    if (Z,nbonds) == (7,2):
                        r0 = pos[iatom]
                        bond1 = pos[ bondedTo[iatom][0]] - r0
                        bond2 = pos[ bondedTo[iatom][1]]  -r0
                        rH = -(bond1 + bond2)
                        rH = 1.09 * rH / np.linalg.norm(rH)
                        atoms.append(Atom('H',  r0+rH ))
                        daves_super_saturate(atoms)

            elif saturate_nitrogens == 0:
                daves_super_saturate(atoms)

            #view(atoms, viewer="avogadro")
            os.popen("mkdir " + ORCA_filepath + "/all_N_zigzag")
            os.chdir(ORCA_filepath + "/all_N_zigzag")
            if (nx>4 and nx<6) and (nz>4 and nz<6):
                data = make_orca(atoms, filename="nitrogenated_%dx%dgraphene.inp" % (nx, nz), multiplicity="1", method=method, geometry_opt=optimize_geometry, output= ORCA_filepath + "/all_N_zigzag/orca%s_nitrogenated_%dx%dsheet.out" % (symmetry_folder_string, nx, nz), convergence_tolerance="Medium")
            if nx>6:
                data = make_orca(atoms, filename="nitrogenated_%dx%dgraphene.inp" % (nx, nz), multiplicity="1", method=method, geometry_opt=optimize_geometry, output= ORCA_filepath + "/all_N_zigzag/orca%s_nitrogenated_%dx%dsheet.out" % (symmetry_folder_string, nx, nz), convergence_tolerance="Loose")
            else:
                data = make_orca(atoms, filename="nitrogenated_%dx%dgraphene.inp" % (nx, nz), multiplicity="1", method=method, geometry_opt=optimize_geometry, output= ORCA_filepath + "/all_N_zigzag/orca%s_nitrogenated_%dx%dsheet.out" % (symmetry_folder_string, nx, nz))
            moenergies_array = data.moenergies[0]
            all_N_SCF_energy_list = np.append(all_N_SCF_energy_list, int(data.scfenergies))
            all_N_HOMO_energy_list = np.append(all_N_HOMO_energy_list, moenergies_array[data.homos])
            all_N_LUMO_energy_list = np.append(all_N_LUMO_energy_list, moenergies_array[data.homos+1])
            sheet_dimensions.append("%sx%s" % (nx, nz))

            with open("nitrogenated_edge_results.txt", 'a+') as e:
                e.write("\n##########################\n")
                e.write("Nitrogenated %dx%d sheet\n" % (nx, nz))
                e.write("##########################\n")
                e.write("Total SCF energy in eV:\t")
                e.write(str(data.scfenergies))
                e.write("\nMolecular orbital energy of H**O in eV:\t")
                e.write(str(moenergies_array[data.homos]))
                e.write("\nMolecular orbital energy of LUMO in eV:\t")
                e.write(str(moenergies_array[data.homos+1]))
    for dimension in xrange(0, len(sheet_dimensions)):
        sheet_count.append(dimension)
    sheet_count = [x+0.2 for x in sheet_count]
    for y in xrange(0, 3):
        pltylist = all_N_SCF_energy_list, all_N_HOMO_energy_list, all_N_LUMO_energy_list
        rectangles = plt.bar(np.arange(all_N_SCF_energy_list.size), pltylist[y], 0.4, alpha=0.4, color='b')
        plt.title("%s vs Sheet Dimensions" % pltylabel_list[y])
        plt.ylabel(pltylabel_list[y])
        plt.xticks(sheet_count, sheet_dimensions)
        plt.xlabel("Sheet Dimension")
        plt.savefig("%s.png" % pltylabel_list[y])
        plt.clf()
Пример #33
0
def get_RGG_links_and_network(
    N,
    k,
    windowwidth=400,
    linkwidth=1,
    node_scale_by_degree=0.5,
    node_radius_scale=1 / 3,
    pos=None,
):
    """
    Return the links and the stylized network
    of a non-periodic random geometric graph
    on a square.

    Parameters
    ==========
    N : int
        Number of nodes
    k : float
        mean degree
    windowwidth : float, default = 400
        The width of the network visualization
    linkwidth : float, default = 1.0
        All links get the same width.
    node_scale_by_degree : float, default = 0.5
        Scale the node radius by ``degree**node_scale_by_degree``.
        Per default, the node disk area will be
        proportional to the degree. If you want
        all nodes to be equally sized, set
        ``node_scale_by_degree = 0``.
    node_radius_scale : float, default = 1/3
        Factor by which the default node size is scaled.
    pos : numpy.ndarray, default = None
        If ``None``, node positions will be drawn uniform at random.
        If not ``None``, should be position array of shape ``N x 2``.

    Returns
    =======
    edge_weight_tuples : list of tuple
        list of tuples that are structured like ``(source, target, weight)``
    network : dict
        stylized network that can be passed to the visualization
    """

    if pos is None:
        pos = np.random.rand(N, 2)
        ndx = np.argsort(pos[:, 0])
        pos = pos[ndx, :]

    tree = KDTree(pos)

    V = N - 1
    R = np.sqrt(k / V / np.pi)

    pairs = tree.query_pairs(R)
    edge_weight_tuples = []
    for u, v in pairs:
        edge_weight_tuples.append(_edge(int(u), int(v), 1.0))

    w = h = windowwidth
    N_side = int(np.ceil(np.sqrt(N)))
    dx = w / N_side
    radius = dx * node_radius_scale

    network = {}
    stylized_network = {
        'xlim': [0, w],
        'ylim': [0, h],
        'linkAlpha': 0.5,
        'nodeStrokeWidth': 0.0001,
    }

    degree = np.zeros(N, )
    for u, v, _w in edge_weight_tuples:
        degree[u] += 1
        degree[v] += 1

    median_degree = np.median(degree)
    if median_degree == 0:
        median_degree = 1
    radius_scale = (degree / median_degree)**node_scale_by_degree
    radius_scale[radius_scale == 0] = 1.0
    radius = radius_scale * radius

    pos *= w

    nodes = [{
        'id': i,
        'x_canvas': _pos[0],
        'y_canvas': _pos[1],
        'radius': radius[i],
    } for i, _pos in enumerate(pos)]

    nodes = nodes[:N]
    links = [{
        'source': u,
        'target': v,
        'width': linkwidth
    } for u, v, w in edge_weight_tuples]

    stylized_network['nodes'] = nodes
    stylized_network['links'] = links

    return edge_weight_tuples, stylized_network
Пример #34
0
del_spot_rad, del_spot_abs = [], []
resos = []
for f in fnames:
    d = utils.open_flex(f)
    rA = d['residA']
    rB = d['residB']
    beamA = d['beamA']
    beamA.set_wavelength(waveA_default)
    beamB = d['beamB']
    det = d['detector']
    refls = d['refls_data']
    idxA = rA['indexed']
    idxB = rB['indexed']
    hkl = rA['hkl']
    tree = KDTree(hkl)
    pairs = tree.query_pairs(1e-3)
    for i1, i2 in map(list, pairs):
        if not idxA[i1] and not idxA[i2] and not idxB[i1] and not idxB[i2]:
            continue
        elif idxA[i1] and not idxA[i2] and not idxB[i1] and idxB[i2]:
            refA = refls[i1]
            refB = refls[i2]
            iA = i1
            iB = i2
        elif idxA[i2] and not idxA[i1] and not idxB[i2] and idxB[i1]:
            refA = refls[i2]
            refB = refls[i1]
            iA = i2
            iB = i1
        else:
            print idxA[[i1, i2]], idxB[[i1, i2]]
Пример #35
0
def _bus(stops='data/beijing_geo/wgs_bus_stops.geojson', lines='data/beijing_geo/wgs_bus_lines.geojson'):
    bus_speed = 18 * 1000 / 60  # 18公里每小时

    stops, lines = gpd.read_file(stops), gpd.read_file(lines)
    assert stops.crs == lines.crs
    crs = stops.crs

    data = pd.merge(stops, lines, left_on='line_id', right_on='id', suffixes=('_stop', '_line'))
    del stops, lines

    data = data.drop(columns=['id_stop', 'line_id', 'id_line', 'type', 'bounds'])
    data = data.rename(columns={
        'name_stop': 'stop_name',
        'sequence': 'stop_seq',
        'geometry_stop': 'stop_geom',
        'name_line': 'line_name',
        'start_stop': 'start_stop',
        'end_stop': 'end_stop',
        'distance': 'line_dist',
        'geometry_line': 'line_geom'
    })

    nodes = list(set(data.apply(
        lambda it: GeneralNode('bus', (it.stop_geom.x, it.stop_geom.y), it.stop_name, it.line_name), axis=1
    )))
    lines = data.groupby('line_name').agg({
        'stop_name': list,
        'stop_seq': list,
        'stop_geom': list,
        'start_stop': 'first',
        'end_stop': 'first',
        'line_dist': 'first',
        'line_geom': 'first'
    })
    lines['stops'] = lines.apply(
        lambda line: [GeneralNode('bus', (_geom.x, _geom.y), _name, line.name) for _name, _, _geom in
                      sorted(zip(line.stop_name, line.stop_seq, line.stop_geom), key=lambda it: int(it[1]))], axis=1
    )
    lines = lines.drop(columns=['stop_name', 'stop_seq', 'stop_geom'])

    net = nx.DiGraph()
    # 增加站点,作为图的结点
    net.add_nodes_from(nodes)

    # 增加非换乘边
    for name, info in tqdm(lines.iterrows()):
        # 10公里(含)内2元。
        # 10公里以上部分,每增加1元可乘坐5公里。
        stops, geom = info.stops, info.line_geom
        for w in range(1, len(stops)):
            for i in range(len(stops) - w):
                j = i + w
                srt, end = stops[i], stops[j]
                if w == 1:
                    dist = get_real_distance(Point(srt.point), Point(end.point), geom, crs=crs)
                else:
                    itr = stops[i + 1]
                    dist = net.get_edge_data(srt, itr).get('distance') + net.get_edge_data(itr, end).get('distance')
                net.add_edge(srt, end, **{
                    'distance': dist,
                    'time': dist / bus_speed,
                    'price': math.ceil((dist - 10) / 5) + 2,
                    'transfer_time': 0,
                    'n_stations': w,
                    'line': name,
                    'plan': stops[i: j + 1]
                })
    # 增加公交换乘边
    coords = list(map(lambda it: (it.x, it.y), map(lambda it: transform(Point(it.point), source_cs=crs), nodes)))
    kdtree = KDTree(coords)
    for i, j in tqdm(kdtree.query_pairs(500)):  # any pair of nodes within 500 meters
        ix, iy = coords[i]
        jx, jy = coords[j]
        dist = ((ix - jx) ** 2 + (iy - jy) ** 2) ** .5
        net.add_edge(nodes[i], nodes[j], **{
            'distance': dist,
            'time': 5,
            'price': 0,
            'transfer_time': 1,
            'plan': [nodes[i].line, nodes[j].line]
        })

    return net
Пример #36
0
    def update(value):
        for c in plot_circles:
            c.remove()
        plot_circles.clear()
        value = int(value)
        start = time_index[max(0, value - history)]
        end = time_index[value]
        title.set_text('from {} to {}'.format(start.strftime('%H:%M (%d/%m/%Y)'), \
            end.strftime('%H:%M (%d/%m/%Y)')))

        filtered_df = df_10min.loc[(start <= df_10min.index)
                                   & (df_10min.index <= end)]
        for vessel_id, group_by_vessel_df in filtered_df.groupby('vessel_id'):
            coords = [bmap(x, y) for x, y in zip(group_by_vessel_df['longitude'], \
                group_by_vessel_df['latitude']) if not np.isnan(x) and not np.isnan(y)]

            (x, y) = zip(*coords) if coords else ([], [])
            plot_vessel[vessel_id].set_xdata(x)
            plot_vessel[vessel_id].set_ydata(y)
            if display_ids:
                plot_vessel_id[vessel_id].set_position(
                    (x[-1] if x else -1e6, y[-1] if y else -1e6))
        # find sts
        neighbor_trace = None
        if value >= neighbor_duration:
            for time in time_index[value - neighbor_duration:value + 1]:
                df_at_time = filtered_df.loc[time]
                data = np.array([(x, y) for x, y in zip(df_at_time['longitude'], \
                    df_at_time['latitude']) if not np.isnan(x) and not np.isnan(y)])

                id_mapping = [i for i, x, y in zip(df_at_time['vessel_id'], \
                    df_at_time['longitude'], df_at_time['latitude']) \
                    if not np.isnan(x) and not np.isnan(y)]

                if len(data) >= 2:
                    T = KDTree(data)
                    pairs = T.query_pairs(min_dist)
                    pairs = set(
                        (id_mapping[i], id_mapping[j]) for i, j in pairs)
                    neighbor_trace = pairs if neighbor_trace is None \
                        else neighbor_trace.intersection(pairs)

        if neighbor_trace is None:
            return
        meta_init = filtered_df.loc[time_index[value - neighbor_duration]]
        meta_end = filtered_df.loc[time_index[value]]
        print('\n########## From {} to {}'.format(time_index[value - neighbor_duration] \
            .strftime('%H:%M (%d/%m/%Y)'), end.strftime('%H:%M (%d/%m/%Y)')))

        for id1, id2 in neighbor_trace:
            id1_data_init = meta_init.loc[meta_init['vessel_id'] ==
                                          id1].iloc[0]
            id1_data_end = meta_end.loc[meta_end['vessel_id'] == id1].iloc[0]
            id2_data_init = meta_init.loc[meta_init['vessel_id'] ==
                                          id2].iloc[0]
            id2_data_end = meta_end.loc[meta_end['vessel_id'] == id2].iloc[0]
            print('Detected STS {} - {}'.format(id1_data_end['vessel_id'], \
                id2_data_end['vessel_id']))

            print('    [{}] draught: {:.1f} --> {:.1f}    status: {} --> {}'.format( \
                id1_data_end['vessel_id'], id1_data_init['draught'], \
                id1_data_end['draught'], id1_data_init['new_navigational_status'], \
                id1_data_end['new_navigational_status']))

            print('    [{}] draught: {:.1f} --> {:.1f}    status: {} --> {}'.format( \
                id2_data_end['vessel_id'], id2_data_init['draught'], \
                id2_data_end['draught'], id2_data_init['new_navigational_status'], \
                id2_data_end['new_navigational_status']))

            x1, y1 = id1_data_end['longitude'], id1_data_end['latitude']
            circle_size, _ = bmap(x1 + min_dist, y1)
            x1, y1 = bmap(x1, y1)
            circle_size = abs(x1 - circle_size)
            x2, y2 = id2_data_end['longitude'], id2_data_end['latitude']
            x2, y2 = bmap(x2, y2)
            x = 0.5 * (x1 + x2)
            y = 0.5 * (y1 + y2)
            circle = plt.Circle((x, y), 3 * circle_size, color='r', alpha=0.5)
            plot_circles.add(circle)
            ax.add_artist(circle)
            circle = plt.Circle((x, y), 30 * circle_size, color='r', alpha=0.1)
            plot_circles.add(circle)
            ax.add_artist(circle)
Пример #37
0
def main():
    parser = argparse.ArgumentParser(fromfile_prefix_chars='@')

    parser.add_argument('-config_filename', default='@example.cfg')
    parser.add_argument('-real_data',
                        '--real_data',
                        help='If true, the provided data is ground truth data',
                        action='store_false')
    parser.add_argument('-data_inputfilename', default='default')
    parser.add_argument('-gt_inputfilename', default='default')
    parser.add_argument('--quadratic', action='store_true')
    parser.add_argument(
        '-voxel_size',
        type=float,
        default=[1.0, 1.0, 1.0],
        nargs='+',
    )
    # Preprocessing parameters
    parser.add_argument('-preprocessing_threshold', type=float, default=0.7)
    # Chunk parameters
    parser.add_argument('-chunk_size', nargs='+', default=False, type=int)
    parser.add_argument('-chunk_overlap',
                        nargs='+',
                        default=[100, 100, 100],
                        type=int)
    parser.add_argument('-chunkbox_start', nargs='+', default=False, type=int)
    parser.add_argument('-chunkbox_end', nargs='+', default=False, type=int)
    parser.add_argument('--from_gt_bb', action='store_true')
    # ILP Parameters
    parser.add_argument(
        '-dummy_edge_cost',
        type=float,
        default=100.0,
        help='ILP hyperparameter: theta_S in the paper. This'
        'parameter can be used to tune the overall number of microtubule trajectory.'
        'The higher the value, the fewer trajectories are generated.')
    parser.add_argument(
        '-distance_cost',
        type=float,
        default=1.0,
        help=
        'ILP hyperparameter: theta_D in the paper, cost for the edge distance. If this value is '
        'high, candidates that are far apart are less likely to be linked.')
    parser.add_argument(
        '-comb_angle_cost',
        type=float,
        default=5.0,
        help=
        'ILP hyperparameter. theta_C in the paper, cost for rigidity. If this value is high, '
        'more rigid structures are generated.')
    parser.add_argument('-perc_of_noise', type=float, default=0.0)
    parser.add_argument(
        '-angle_cost_factor',
        type=float,
        default=0.0,
        help=
        'ILP hyperparameter: theta_a in the paper, cost for the discrepancy of estimated '
        'direction vector of a candidate compared to the angle generated by the potential link.'
    )
    parser.add_argument('-tree_query_distance', type=float, default=150.0)
    parser.add_argument(
        '-selection_cost',
        type=float,
        default=-50.0,
        help=
        'ILP hyperparameter: theta_V in the paper, cost for the selection of a candidate.'
    )
    parser.add_argument('-pairwise', type=bool, default=True)
    parser.add_argument('--exclusive_distance', action='store_true')
    parser.add_argument('-exclusive_distance_threshold',
                        type=float,
                        default=25.0)
    parser.add_argument('-timelimit', type=float, default=1000.)
    # parser.add_argument('-simulation_parameter', nargs='+',
    #                     help='If ground truth data is used, this is the percentage of noise added and '
    #                          'the number of microtubules.', default=[0.1, 14], type=float)
    parser.add_argument('--verbose', '-v', action='store_true')
    parser.add_argument('--visualize', action='store_true')
    parser.add_argument('-outputfolder', default='')
    parser.add_argument('-outputdirectory', default='')
    parser.add_argument('--horizontal', action='store_true')
    parser.add_argument('--merge_connected_nodes', action='store_true')
    # Postprocessing parameters
    parser.add_argument('-mask_seg_inputfilename', default='default')

    results = parser.parse_args()
    voxel_size = np.array(results.voxel_size)
    correction_factor = 1. / voxel_size
    # correction_factor = np.array([1/4.6, 1/4.6, 1/50.])
    additive = np.array([0, 0, 0])
    if results.verbose:
        print "config settings"
        print results

    # Parameters
    visualize = results.visualize
    params = PGM.ILPParameters(
        dummy_edge_cost=results.dummy_edge_cost,
        distance_cost=results.distance_cost,
        comb_angle_cost=results.comb_angle_cost,
        angle_cost_factor=results.angle_cost_factor,
        selection_cost=results.selection_cost,
        pairwise=results.pairwise,
        exclusive_distance=results.exclusive_distance,
        exclusive_distance_threshold=results.exclusive_distance_threshold)
    # Chunk parameters
    chunk_size = np.array(results.chunk_size)
    chunk_overlap = np.array(results.chunk_overlap)

    # Distance to build up the graphs
    tree_query_distance = results.tree_query_distance

    if results.gt_inputfilename != 'default':
        filename = results.gt_inputfilename
        skeletons = knossos_utils.from_nml_to_nx_skeletons(filename,
                                                           scaling=1 /
                                                           correction_factor)
        skeletons = networkx_utils.make_nx_graphs_unique(skeletons)
        all_skeletons = nx.compose_all(skeletons)
        all_skeletons = networkx_utils.NxSkeleton(all_skeletons)
        all_skeletons.add_geom_features_to_edges()
        all_skeletons.add_geom_features_to_nodes()
        all_skeletons.add_coords_to_node_id_dic()
        data = networkx_utils.from_nx_skeleton_to_datapoints(skeletons)
        gt_data = data.copy()

    filename = results.data_inputfilename
    print "preparing input data"
    extracted_skeletons = knossos_utils.from_nml_to_nx_skeletons(
        filename, scaling=1 / correction_factor)
    extracted_skeletons = networkx_utils.stitch_nx_graphs_together_based_on_same_coord(
        extracted_skeletons)

    data = networkx_utils.from_nx_skeleton_to_datapoints(
        [extracted_skeletons.nx_graph])
    extracted_skeletons.add_coords_to_node_id_dic()
    print "input data prepared"

    if not results.chunkbox_start:
        bb_low = np.min(data[:, 0:3], axis=0)
    else:
        bb_low = np.array(results.chunkbox_start)
    if not results.chunkbox_end:
        bb_upper = np.max(data[:, 0:3], axis=0)
    else:
        bb_upper = np.array(results.chunkbox_end)

    if results.from_gt_bb:
        try:
            bb_low = np.min(gt_data[:, 0:3], axis=0)
            bb_upper = np.max(gt_data[:, 0:3], axis=0)
        except UnboundLocalError:
            print "please provide a ground truth filename or select other bounding box option"

    if not results.chunk_size:
        chunk_size = bb_upper - bb_low

    if results.verbose:
        print "Bounding box of data", bb_low, bb_upper
    # Set the chunk information
    windows = chunks.Chunks(bb_low,
                            bb_upper - bb_low,
                            chunk_size,
                            chunk_overlap,
                            verbose=results.verbose)
    windows.data_to_chunks(data[:, 0:3])
    # outputbase = os.path.join(results.outputdirectory, 'ILP', results.outputfolder,
    #                           eo.get_daystamp_folder_name(), eo.get_timestamp_folder_name())
    outputbase = os.path.join(results.outputdirectory, 'ILP_results',
                              results.outputfolder)
    if not os.path.exists(outputbase):
        os.makedirs(outputbase)
    outputdirectory = os.path.join(outputbase, 'knossos', '')
    if not os.path.exists(outputdirectory):
        os.makedirs(outputdirectory)
    whole_nx_graph_list = []
    for (window_count, window) in enumerate(windows.chunk_list):
        if results.verbose:
            print "processing chunk %i from %i" % (window_count,
                                                   len(windows.chunk_list))
        data_in_window = data[window.indeces]
        # Test whether there is enough points (~4) in window
        if data_in_window.shape[0] > 4:
            print "%i datapoints in current chunk" % data_in_window.shape[0]
            tree = KDTree(
                zip(data_in_window[:, 0].ravel(), data_in_window[:, 1].ravel(),
                    data_in_window[:, 2].ravel()))
            id_to_pos = {}

            for ii in range(tree.data.shape[0]):
                id_to_pos[ii] = tree.data[ii, :]

            # Generate edgelist with the help of kdtree (only those edges are linked)
            distance_graphs = tree.query_pairs(tree_query_distance)

            # Not all data points might be connected (and the ILP can not see this) so split the
            # graphs into connected components and let them be solved separately
            # List of nx_skeletons
            nx_skeletons_all = networkx_utils.from_dtps_to_nx_skeletons(
                tree.data, distance_graphs)

            print "len of edgelist", len(distance_graphs)
            print "There are %i of independent clusters in the dataset" % len(
                nx_skeletons_all)
            nx_skeletons = []
            count = 0
            for nx_skeleton in nx_skeletons_all:
                num_of_nodes = nx_skeleton.nx_graph.number_of_nodes()
                if num_of_nodes > 3:
                    nx_skeletons.append(nx_skeleton)
                else:
                    count += 0
            print "%i clusters have been filtered out" % count

            new_skeletons = []
            model_status_list = []
            for ii in range(len(nx_skeletons)):
                # for ii in range(3):
                g = nx_skeletons[ii]
                g.add_geom_features_to_edges()
                g.add_geom_features_to_nodes()
                g.print_statistics()
                # Add Ground truth direction vectors

                # g = add_ground_truth_direction_vector(g, extracted_skeletons)
                new_graph, model = PGM.calculate_ILP_linear(
                    g, params, timelimit=results.timelimit)
                new_skeletons.append(new_graph)
                model_status_list.append((model.status, model.MIPGap))

            skeleton_list = []
            for new_graph in new_skeletons:
                graph_for_evaluation = new_graph.nx_graph.copy()
                whole_nx_graph_list.append(graph_for_evaluation)
                new_graph.scale_positions(correction_factor, additive)
                skeleton_list.append(new_graph.nx_graph)

            print "length of skeletonlist", len(skeleton_list)

            # outputfilename = outputdirectory + "/fitted_models_chunk%i.nml" % window_count
            # knossos_utils.from_nx_graphs_to_knossos(skeleton_list, outputfilename)
        else:
            print "not enough datapoints in window %i" % window_count
            print data_in_window.shape

        data_in_window = data_in_window * correction_factor + np.array(
            [0, 0, 0])
        # knossos_utils.datapoints_to_knossos(data_in_window,
        #                                     outputdirectory + "/orig_datapoints_chunk%i.nml" % window_count)

    unstitched_nx_skeleton = networkx_utils.stitch_nx_graphs_together(
        whole_nx_graph_list)
    if len(windows.chunk_list) > 1:
        stitched_nx_skeleton = networkx_utils.stitch_nx_graphs_together_based_on_same_coord(
            whole_nx_graph_list)
    else:
        stitched_nx_skeleton = networkx_utils.stitch_nx_graphs_together(
            whole_nx_graph_list)
    if visualize:
        stitched_nx_skeleton.visualize()

    if not results.mask_seg_inputfilename == 'default':
        # Mask results
        f = h5py.File(results.mask_seg_inputfilename, 'r')
        data_mask = f['seg'].value
        f.close()
        stitched_nx_skeleton.scale_positions(correction_factor, additive)
        networkx_utils.crop_nx_graph_with_mask(stitched_nx_skeleton.nx_graph,
                                               data_mask)
    else:

        stitched_nx_skeleton.scale_positions(correction_factor, additive)
    outputfilename = os.path.join(outputdirectory,
                                  'fitted_models_all_chunks.nml')
    # knossos_utils.from_nx_graphs_to_knossos([stitched_nx_skeleton.nx_graph], outputfilename)

    unstitched_nx_skeleton.scale_positions(correction_factor, additive)
    resultfilename = os.path.join(outputdirectory,
                                  'fitted_models_all_chunks_not_stitched.nml')
    # knossos_utils.from_nx_graphs_to_knossos([unstitched_nx_skeleton.nx_graph], resultfilename)

    input_graph = networkx_utils.stitch_nx_graphs_together(
        [nx_skeleton.nx_graph for nx_skeleton in nx_skeletons_all])

    input_graph.scale_positions(correction_factor, additive)
    inputgraphfilename = os.path.join(outputdirectory, 'input_graph.nml')
    knossos_utils.from_nx_graphs_to_knossos([input_graph.nx_graph],
                                            inputgraphfilename)

    for min_num_nodes in range(3):
        splitted_nx_graphs = networkx_utils.split_nx_graphs(
            [stitched_nx_skeleton.nx_graph], min_number_of_nodes=min_num_nodes)
        outputfilename = os.path.join(
            outputdirectory,
            'fitted_models_all_chunks_splitted_min%i.nml' % min_num_nodes)

        knossos_utils.from_nx_graphs_to_knossos(splitted_nx_graphs,
                                                outputfilename)

    outputfilename = os.path.join(outputdirectory, "input_datapoints.nml")
    extracted_skeletons.scale_positions(correction_factor, additive=additive)
    knossos_utils.from_nx_graphs_to_knossos([extracted_skeletons.nx_graph],
                                            outputfilename)
    if not results.gt_inputfilename == 'default':
        all_skeletons.scale_positions(correction_factor, additive)
        outputfilename = outputdirectory + "/GT.nml"
        knossos_utils.from_nx_graphs_to_knossos([all_skeletons.nx_graph],
                                                outputfilename)
    # Write results to file
    arguments_outputfile = outputbase + '/arguments.h5'
    termincal_commands = vars(results)
    for key, value in termincal_commands.iteritems():
        utils.write_data_to_h5(arguments_outputfile,
                               value,
                               key,
                               overwrite=True,
                               verbose=False)

    results_outputfile = outputbase + '/results.h5'
    utils.write_data_to_h5(results_outputfile,
                           resultfilename,
                           'resultfilename',
                           overwrite=True,
                           verbose=False)
    utils.write_data_to_h5(results_outputfile,
                           stitched_nx_skeleton.get_number_of_cc(),
                           'number_of_skeletons_result',
                           overwrite=True,
                           verbose=False)

    utils.write_data_to_h5(results_outputfile,
                           model_status_list,
                           'status_report',
                           overwrite=True,
                           verbose=False)
    if results.verbose:
        print 'final knossosskeleton is stored in ', outputdirectory + 'fitted_models_all_chunks_splitted_min2.nml'
def get_bond_list(geom,
                  atoms=None,
                  threshold=4,
                  min_neighbors=4,
                  snapshots=30,
                  bond_threshold=1.8,
                  enforce=()):
    """Get the list of all the important atom pairs.
    Samples a number of snapshots from a list of geometries to generate all
    distances that are below a given threshold in any of them.

    Args:
        atoms:      Symbols for each atoms.
        geom:       One or a list of geometries to check for pairs
        threshold:  Threshold for including a bond in the bond list
        min_neighbors: Minimum number of neighbors to include for each atom.
                    If an atom has smaller than this number of bonds, additional
                    distances will be added to reach this number.
        snapshots:  Number of snapshots to be used in the generation, useful
                    for speeding up the process if the path is long and
                    atoms numerous.

    Returns:
        List of all the included interatomic distance pairs.
    """
    # Type casting and value checks on input parameters
    geom = np.asarray(geom)
    if len(geom.shape) < 3:
        # If there is only one geometry or it is flattened, promote to 3d
        geom = geom.reshape(1, -1, 3)
    min_neighbors = min(min_neighbors, geom.shape[1] - 1)

    # Determine which images to be used to determine distances
    snapshots = min(len(geom), snapshots)
    images = [0, len(geom) - 1]
    if snapshots > 2:
        images.extend(
            np.random.choice(range(1, snapshots - 1),
                             snapshots - 2,
                             replace=False))
    # Get neighbor list for included geometry and merge them
    rijset = set(enforce)
    for image in images:
        tree = KDTree(geom[image])
        pairs = tree.query_pairs(threshold)
        rijset.update(pairs)
        bonded = tree.query_pairs(bond_threshold)
        neighbors = {i: {i} for i in range(geom.shape[1])}
        for i, j in bonded:
            neighbors[i].add(j)
            neighbors[j].add(i)
        for i, j in bonded:
            for ni in neighbors[i]:
                for nj in neighbors[j]:
                    if ni != nj:
                        pair = tuple(sorted([ni, nj]))
                        if pair not in rijset:
                            rijset.add(pair)
    rijlist = sorted(rijset)
    # Check neighbor count to make sure `min_neighbors` is satisfied
    count = np.zeros(geom.shape[1], dtype=int)
    for i, j in rijlist:
        count[i] += 1
        count[j] += 1
    for idx, ct in enumerate(count):
        if ct < min_neighbors:
            _, neighbors = tree.query(geom[-1, idx], k=min_neighbors + 1)
            for i in neighbors:
                if i == idx:
                    continue
                pair = tuple(sorted([i, idx]))
                if pair in rijset:
                    continue
                else:
                    rijset.add(pair)
                    rijlist.append(pair)
                    count[i] += 1
                    count[idx] += 1
    if atoms is None:
        re = np.full(len(rijlist), 2.0)
    else:
        radius = np.array(
            [ATOMIC_RADIUS.get(atom.capitalize(), 1.5) for atom in atoms])
        re = np.array([radius[i] + radius[j] for i, j in rijlist])
    logger.debug("Pair list contain %d pairs", len(rijlist))
    return rijlist, re
Пример #39
0
 def ariadne_run(self):
     inputs = self.input()
     cg_tgt = inputs.next()
     synapse_tgts = list(inputs)
     #
     # The connectivity graph for mapping neurite IDs
     #
     with cg_tgt.open("r") as fd:
         cg = ConnectivityGraph.load(fd)
     neuron_1 = []
     neuron_2 = []
     score = []
     synapse_center_x = []
     synapse_center_y = []
     synapse_center_z = []
     neuron_1_center_x = []
     neuron_1_center_y = []
     neuron_1_center_z = []
     neuron_2_center_x = []
     neuron_2_center_y = []
     neuron_2_center_z = []
     volumes = []
     volume_idx = []
     for idx, synapse_tgt in enumerate(synapse_tgts):
         with synapse_tgt.open("r") as fd:
             synapse_dict = json.load(fd)
         volume = Volume(**synapse_dict["volume"])
         volumes.append(volume)
         if len(synapse_dict["neuron_1"]) == 0:
             rh_logger.logger.report_event(
                 "No synapses found in volume, %d, %d, %d" % 
                 (volume.x, volume.y, volume.z))
             continue
         n1 = cg.convert(np.array(synapse_dict["neuron_1"]), volume)
         n2 = cg.convert(np.array(synapse_dict["neuron_2"]), volume)
         sx = np.array(synapse_dict["synapse_centers"]["x"])+volume.x
         sy = np.array(synapse_dict["synapse_centers"]["y"])+volume.y
         sz = np.array(synapse_dict["synapse_centers"]["z"])+volume.z
         n1x = np.array(synapse_dict["neuron_1_centers"]["x"])+volume.x
         n1y = np.array(synapse_dict["neuron_1_centers"]["y"])+volume.y
         n1z = np.array(synapse_dict["neuron_1_centers"]["z"])+volume.z
         n2x = np.array(synapse_dict["neuron_2_centers"]["x"])+volume.x
         n2y = np.array(synapse_dict["neuron_2_centers"]["y"])+volume.y
         n2z = np.array(synapse_dict["neuron_2_centers"]["z"])+volume.z
         neuron_1.append(n1)
         neuron_2.append(n2)
         score.append(np.array(synapse_dict["score"]))
         synapse_center_x.append(sx)
         synapse_center_y.append(sy)
         synapse_center_z.append(sz)
         neuron_1_center_x.append(n1x)
         neuron_1_center_y.append(n1y)
         neuron_1_center_z.append(n1z)
         neuron_2_center_x.append(n2x)
         neuron_2_center_y.append(n2y)
         neuron_2_center_z.append(n2z)
         volume_idx.append([idx] * len(n1))
     volume_idx, neuron_1, neuron_2, score, \
         synapse_center_x, synapse_center_y, synapse_center_z, \
         neuron_1_center_x, neuron_1_center_y, neuron_1_center_z, \
         neuron_2_center_x, neuron_2_center_y, neuron_2_center_z = \
         map(np.hstack, [
             volume_idx, neuron_1, neuron_2, score, 
             synapse_center_x, synapse_center_y, synapse_center_z,
             neuron_1_center_x, neuron_1_center_y, neuron_1_center_z,
             neuron_2_center_x, neuron_2_center_y, neuron_2_center_z])
     #
     # We pick the synapse farthest from the edge when eliminating.
     # The following code computes the distance to the edge
     #
     vx0, vx1, vy0, vy1, vz0, vz1 = [
         np.array([getattr(volume, _) for volume in volumes])
         for _ in "x", "x1", "y", "y1", "z", "z1"]
     sx, sy, vx0, vx1, vy0, vy1 = \
         [_ * self.xy_nm for _ in 
          synapse_center_x, synapse_center_y, vx0, vx1, vy0, vy1]
     sz, vz0, vz1 = \
         [np.array(_) * self.z_nm for _ in 
                  synapse_center_z, vz0, vz1]
     volume_idx = np.array(volume_idx)
     dx = np.minimum(sx - vx0[volume_idx], vx1[volume_idx] - sx)
     dy = np.minimum(sy - vy0[volume_idx], vy0[volume_idx] - sy)
     dz = np.minimum(sz - vz0[volume_idx], vz1[volume_idx] - sz)
     d_edge = np.sqrt(dx * dx + dy * dy + dz * dz)
     #
     # Create a KDTree, converting coordinates to nm and get pairs
     # closer than the allowed minimum inter-synapse distance.
     #
     t0 = time.time()
     kdtree = KDTree(np.column_stack((
         np.array(synapse_center_x) * self.xy_nm,
         np.array(synapse_center_y) * self.xy_nm,
         np.array(synapse_center_z) * self.z_nm)))
     rh_logger.logger.report_metric(
          "AggregateSynapseConnectionsTask.KDTreeBuildTime", 
          time.time() - t0)
     t0 = time.time()
     pairs = np.array(list(kdtree.query_pairs(self.min_distance_nm)))
     rh_logger.logger.report_metric(
             "AggregateSynapseConnectionsTask.KDTreeQueryPairsTime", 
              time.time() - t0)
     #
     # Eliminate the duplicates.
     #
     if len(pairs) > 0:
         d_pair = np.sqrt(
             (sx[pairs[:, 0]] - sx[pairs[:, 1]]) ** 2 + 
             (sy[pairs[:, 0]] - sy[pairs[:, 1]]) ** 2 + 
             (sz[pairs[:, 0]] - sz[pairs[:, 1]]) ** 2)
         #
         # Use the edge distance if within min_distance_identical_nm,
         # otherwise, use the synapse score.
         #
         use_edge = d_pair <= self.min_distance_identical_nm
         
         first_is_best = \
             ((d_edge[pairs[:, 0]] > d_edge[pairs[:, 1]]) & use_edge) | \
             ((score[pairs[:, 0]] > score[pairs[:, 1]]) & ~ use_edge)
         to_remove = np.unique(np.hstack(
              [pairs[first_is_best, 1], pairs[~ first_is_best, 0]]))
         neuron_1, neuron_2, score, \
             synapse_center_x, synapse_center_y, synapse_center_z, \
             neuron_1_center_x, neuron_1_center_y, neuron_1_center_z, \
             neuron_2_center_x, neuron_2_center_y, neuron_2_center_z = \
             [np.delete(_, to_remove) for _ in 
              neuron_1, neuron_2, score,
              synapse_center_x, synapse_center_y, synapse_center_z,
              neuron_1_center_x, neuron_1_center_y, neuron_1_center_z,
              neuron_2_center_x, neuron_2_center_y, neuron_2_center_z]
     #
     # Make the dictionaries.
     #
     neuron_1, neuron_2, score, \
         synapse_center_x, synapse_center_y, synapse_center_z, \
         neuron_1_center_x, neuron_1_center_y, neuron_1_center_z, \
         neuron_2_center_x, neuron_2_center_y, neuron_2_center_z = [
             _.tolist() for _ in
             neuron_1, neuron_2, score, 
             synapse_center_x, synapse_center_y, synapse_center_z,
             neuron_1_center_x, neuron_1_center_y, neuron_1_center_z,
             neuron_2_center_x, neuron_2_center_y, neuron_2_center_z]
     result = dict(
         neuron_1 = neuron_1,
         neuron_2 = neuron_2,
         synapse_center=dict(
             x=synapse_center_x,
             y=synapse_center_y,
             z=synapse_center_z),
         neuron_1_center=dict(
             x=neuron_1_center_x,
             y=neuron_1_center_y,
             z=neuron_1_center_z),
         neuron_2_center=dict(
             x=neuron_2_center_x,
             y=neuron_2_center_y,
             z=neuron_2_center_z)        )
     with self.output().open("w") as fd:
         json.dump(result, fd)