def edmondsKarp(graph, s, t, dim): flow = 0 source, sink = s, t temp1, predecessor = breadth_first_order(csr_matrix(graph), 0, directed=True, return_predecessors=True) sPath = shortestPath(predecessor, source, sink) while source in sPath: minCap = findMinEdgeCap(graph, sPath) flow = flow + minCap graph = augmentPath(graph, sPath, minCap) temp2, predecessor = breadth_first_order(csr_matrix(graph), 0, directed=True, return_predecessors=True) #store escape route to print escapeRoute=" " for i in range(1,len(sPath)-1,2): for x in range(1,dim+1): for y in range(1,dim+1): if(((x-1)*2*dim + (2*(y-1)+1))==sPath[i]): escapeRoute = escapeRoute + "(" + str(x) + "," + str(y) + ")" + " -> " break if escapeRoute!=" ": answer.append(escapeRoute[:-4]) sPath = shortestPath(predecessor, source, sink) return flow
def _compute_reachability( self, dependencies: Dict[str, Dict[str, Slot]]) -> None: if self.reachability: return input_modules = self.get_inputs() k = list(dependencies.keys()) index = dict(zip(k, range(len(k)))) graph = self._dependency_csgraph(dependencies, index) self.reachability = {} reachability = { inp: set( breadth_first_order(graph, index[inp], return_predecessors=False)) for inp in input_modules } for vis in self.get_visualizations(): vis_index = index[vis] vis_reachability = set( breadth_first_order(graph.T, vis_index, return_predecessors=False)) for inp in input_modules: inp_reachability = reachability[inp] if vis_index in inp_reachability: inter = vis_reachability.intersection(inp_reachability) inter = {k[i] for i in inter} if inp in self.reachability: self.reachability[inp].update(inter) else: self.reachability[inp] = inter
def capacity_scale(G, s, t): """ :param G: Network Graph :param s: Source :param t: Sink :return: Maximum flow through the network """ flow = 0 source, sink = s, t delta = 2**len(bin(np.amax(G))[2:]) while delta >= 1: G_delta = scale_graph(G, delta) nodes, predecessor = breadth_first_order(csr_matrix(G_delta), 0, directed=True, return_predecessors=True) shortest_path = path(predecessor, source, sink) while len(shortest_path) > 1: bottleneck_edge = find_bottleneck(G_delta, shortest_path) flow += bottleneck_edge G = augment(G, shortest_path, bottleneck_edge) G_delta = scale_graph(G, delta) nodes, predecessor = breadth_first_order(csr_matrix(G_delta), 0, directed=True, return_predecessors=True) shortest_path = path(predecessor, source, sink) delta /= 2 return flow
def edmonds_karp(G, s, t): """ :param G: Network Graph :param s: Source :param t: Sink :return: Maximum flow through the network """ flow = 0 source, sink = s, t nodes, predecessor = breadth_first_order(csr_matrix(G), 0, directed=True, return_predecessors=True) shortest_path = path(predecessor, source, sink) # print shortest_path while len(shortest_path) > 1: bottleneck_edge = find_bottleneck(G, shortest_path) flow += bottleneck_edge G = augment(G, shortest_path, bottleneck_edge) nodes, predecessor = breadth_first_order(csr_matrix(G), 0, directed=True, return_predecessors=True) shortest_path = path(predecessor, source, sink) return flow
def has_connection(graph, A, B): r"""Check if the given graph contains a path connecting A and B. Parameters ---------- graph : scipy.sparse matrix Adjacency matrix of the graph A : array_like The set of starting states B : array_like The set of end states Returns ------- hc : bool True if the graph contains a path connecting A and B, otherwise False. """ for istart in A: nodes = csgraph.breadth_first_order(graph, istart, directed=True, return_predecessors=False) if has_path(nodes, A, B): return True return False
def find_mst_bridge(self): if not hasattr(self, 'distMat'): self.get_all_bridge() # MST bridges with breadth_first_order distMatMst = csg.minimum_spanning_tree(self.distMat) succs, preds = csg.breadth_first_order(distMatMst, i_start=self.labelRef-1, directed=False) # save to self.bridges self.bridges = [] for i in range(1, succs.size): n0 = preds[succs[i]] + 1 n1 = succs[i] + 1 # read conn nn = sorted([str(n0), str(n1)]) conn = self.connDict['{}{}'.format(nn[0], nn[1])] y0, x0 = conn[str(n0)] y1, x1 = conn[str(n1)] # save bdg bridge = dict() bridge['x0'] = x0 bridge['y0'] = y0 bridge['x1'] = x1 bridge['y1'] = y1 bridge['label0'] = n0 bridge['label1'] = n1 self.bridges.append(bridge) self.num_bridge = len(self.bridges) return
def multiple_path(self, start, points): points = points_order(start, points) paths = [] for i in range(len(points)): end = points[i] sx, sy = start ex, ey = end scx = math.floor(sx / self.cell_size) scy = math.floor(sy / self.cell_size) s_encoding = scx * self.grid_size + scy ecx = math.floor(ex / self.cell_size) ecy = math.floor(ey / self.cell_size) e_encoding = ecx * self.grid_size + ecy D, Pr = breadth_first_order(self.m, s_encoding, directed=False) p = [] if (Pr[e_encoding] < 0): print('no path exists for ', i) paths.append(-1) return paths p.append([ecx, ecy]) encoding = e_encoding while (Pr[encoding] >= 0): predecessor = Pr[encoding] x = predecessor // self.grid_size y = predecessor % self.grid_size p = [[x, y]] + p encoding = predecessor paths.append(p) start = end return paths
def find_mst_bridge(self): if not hasattr(self, 'distMat'): self.get_all_connections() # MST bridges with breadth_first_order distMatMst = csg.minimum_spanning_tree(self.distMat) succs, preds = csg.breadth_first_order(distMatMst, i_start=self.labelRef - 1, directed=False) # save to self.bridges self.bridges = [] for i in range(1, succs.size): n0 = preds[succs[i]] + 1 n1 = succs[i] + 1 # read conn nn = sorted([str(n0), str(n1)]) conn = self.connDict['{}{}'.format(nn[0], nn[1])] y0, x0 = conn[str(n0)] y1, x1 = conn[str(n1)] # save bdg bridge = dict() bridge['x0'] = x0 bridge['y0'] = y0 bridge['x1'] = x1 bridge['y1'] = y1 bridge['label0'] = n0 bridge['label1'] = n1 self.bridges.append(bridge) self.num_bridge = len(self.bridges) return
def _calculate_cluster_speeds(self): # Find the cluster with the highest number of intersections most_intersections = 0 root_cluster = None for cluster in self.sim.clusters: intersections = 0 for segment in cluster.tour.objects: for other_cluster in self.sim.clusters: if other_cluster == cluster: continue if segment in other_cluster.tour.objects: intersections += 1 if intersections > most_intersections: most_intersections = intersections root_cluster = cluster nodes, preds = sp.breadth_first_order( self.cluster_graph, self.sim.clusters.index(root_cluster), return_predecessors=True) for node in nodes: parent_idx = preds[node] child_idx = node if parent_idx == -9999: self.sim.clusters[child_idx].mdc_speed = self.env.mdc_speed else: self._set_child_speed(self.sim.clusters[parent_idx], self.sim.clusters[child_idx])
def descendants(self, node, sort=False): descendants = csgraph.breadth_first_order( self, i_start=node, directed=True, return_predecessors=False)[1:] if not sort: return descendants return sorted(descendants)
def checker(path_to_input, path_to_output): true_output = {} user_output = {} with open(path_to_input, 'r') as f: start, finish = map(int, f.readline().split()) start -= 1 finish -= 1 n, m, t = map(int, f.readline().split()) input_path = [[0 for _ in range(n)] for _ in range(n)] for _ in range(m): i, j = map(int, f.readline().split()) i -= 1 j -= 1 input_path[i][j] = 1 with open(path_to_output, 'r') as f: output_len = int(f.readline()) user_output['n'] = output_len matrix = csr_matrix(input_path) true_out, aa = breadth_first_order(matrix, start) true_out = true_out.tolist() true_output['n'] = len(backtrace(aa, start, finish)) - 1 return user_output, true_output, true_output['n'] == user_output['n']
def _compute_period(self): """ Set ``self._period`` and ``self._cyclic_components_proj``. Use the algorithm described in: J. P. Jarvis and D. R. Shier, "Graph-Theoretic Analysis of Finite Markov Chains," 1996. """ # Degenerate graph with a single node (which is strongly connected) # csgraph.reconstruct_path would raise an exception # github.com/scipy/scipy/issues/4018 if self.n == 1: if self.csgraph[0, 0] == 0: # No edge: "trivial graph" self._period = 1 # Any universally accepted definition? self._cyclic_components_proj = np.zeros(self.n, dtype=int) return None else: # Self loop self._period = 1 self._cyclic_components_proj = np.zeros(self.n, dtype=int) return None if not self.is_strongly_connected: raise NotImplementedError( 'Not defined for a non strongly-connected digraph' ) if np.any(self.csgraph.diagonal() > 0): self._period = 1 self._cyclic_components_proj = np.zeros(self.n, dtype=int) return None # Construct a breadth-first search tree rooted at 0 node_order, predecessors = \ csgraph.breadth_first_order(self.csgraph, i_start=0) bfs_tree_csr = \ csgraph.reconstruct_path(self.csgraph, predecessors) # Edges not belonging to tree_csr non_bfs_tree_csr = self.csgraph - bfs_tree_csr non_bfs_tree_csr.eliminate_zeros() # Distance to 0 level = np.zeros(self.n, dtype=int) for i in range(1, self.n): level[node_order[i]] = level[predecessors[node_order[i]]] + 1 # Determine the period d = 0 for node_from, node_to in _csr_matrix_indices(non_bfs_tree_csr): value = level[node_from] - level[node_to] + 1 d = gcd(d, value) if d == 1: self._period = 1 self._cyclic_components_proj = np.zeros(self.n, dtype=int) return None self._period = d self._cyclic_components_proj = level % d
def ancestors(self, node, sort=False): ancestors = csgraph.breadth_first_order(self.T, i_start=node, directed=True, return_predecessors=False)[1:] if not sort: return ancestors return sorted(ancestors)
def _best_subset(self, n_qubits): """Computes the qubit mapping with the best connectivity. Args: n_qubits (int): Number of subset qubits to consider. Returns: ndarray: Array of qubits to use for best connectivity mapping. """ if n_qubits == 1: return np.array([0]) device_qubits = self.coupling_map.size() cmap = np.asarray(self.coupling_map.get_edges()) data = np.ones_like(cmap[:, 0]) sp_cmap = sp.coo_matrix((data, (cmap[:, 0], cmap[:, 1])), shape=(device_qubits, device_qubits)).tocsr() best = 0 best_map = None # do bfs with each node as starting point for k in range(sp_cmap.shape[0]): bfs = cs.breadth_first_order(sp_cmap, i_start=k, directed=False, return_predecessors=False) connection_count = 0 sub_graph = [] for i in range(n_qubits): node_idx = bfs[i] for j in range(sp_cmap.indptr[node_idx], sp_cmap.indptr[node_idx + 1]): node = sp_cmap.indices[j] for counter in range(n_qubits): if node == bfs[counter]: connection_count += 1 sub_graph.append([node_idx, node]) break if connection_count > best: best = connection_count best_map = bfs[0:n_qubits] # Return a best mapping that has reduced bandwidth mapping = {} for edge in range(best_map.shape[0]): mapping[best_map[edge]] = edge new_cmap = [[mapping[c[0]], mapping[c[1]]] for c in sub_graph] rows = [edge[0] for edge in new_cmap] cols = [edge[1] for edge in new_cmap] data = [1] * len(rows) sp_sub_graph = sp.coo_matrix( (data, (rows, cols)), shape=(n_qubits, n_qubits)).tocsr() perm = cs.reverse_cuthill_mckee(sp_sub_graph) best_map = best_map[perm] return best_map
def _breadth_first_order(G): inds = np.arange(G.num_vertices()) adj = G.matrix(dense=True, csr=True) total_order = [] while len(inds) > 0: order = ssc.breadth_first_order(adj, np.random.choice(inds), return_predecessors=False) inds = np.setdiff1d(inds, order, assume_unique=True) total_order = np.append(total_order, order) return total_order.astype(int)
def longest_path_in_tree(tree, start_from=0): ''' Finds the longest *undirected* path in a tree using two searches. Algorithm: 1. Run BFS from an arbitrary node (`start_from`). Call the furthest node F1. 2. Starting from F1, run another BFS. Find the furthest node F2. 3. Return the path between F1 and F2. Parameters ---------- tree : sparse matrix Input matrix. Must represent a valid tree. start_from : int Node to start from. This is useful for cases where the graph *contains* a tree, but may contain stranded nodes (or other connected components). Specifying the start node allows you to determine what connected component the algorithm finds the longest path in. (It still assumes that that connected component is a valid tree.) Returns ------- path : ndarray The ordered list of nodes traversed in the longest path, including the start/end nodes. ''' furthest_node_1 = breadth_first_order(tree, start_from, directed=False, return_predecessors=False)[-1] search_result, predecessors = breadth_first_order(tree, furthest_node_1, directed=False) furthest_node_2 = search_result[-1] path = reconstruct_predecessor_path(predecessors, furthest_node_1, furthest_node_2) # Because furthest_node_1 is furthest from 0, a path found from it will # often go from a much higher number to 0. While this is technically valid, # it's often more aesthetically appealing to have the path go the other # direction, so we reverse what we get from tracing predecessors. return path[::-1]
def _breadth_first_order(G): inds = np.arange(G.num_vertices()) adj = G.matrix('dense', 'csr') total_order = [] while len(inds) > 0: order = ssc.breadth_first_order(adj, np.random.choice(inds), return_predecessors=False) inds = np.setdiff1d(inds, order, assume_unique=True) total_order = np.append(total_order, order) return total_order.astype(int)
def _best_subset(backend, n_qubits): """Computes the qubit mapping with the best connectivity. Parameters: backend (Qiskit.BaseBackend): A QISKit backend instance. n_qubits (int): Number of subset qubits to consider. Returns: ndarray: Array of qubits to use for best connectivity mapping. Raises: QISKitError: Wrong number of qubits given. """ if n_qubits == 1: return np.array([0]) elif n_qubits <= 0: raise QISKitError('Number of qubits <= 0.') device_qubits = backend.configuration()['n_qubits'] if n_qubits > device_qubits: raise QISKitError('Number of qubits greater than device.') cmap = np.asarray(backend.configuration()['coupling_map']) data = np.ones_like(cmap[:, 0]) sp_cmap = sp.coo_matrix((data, (cmap[:, 0], cmap[:, 1])), shape=(device_qubits, device_qubits)).tocsr() best = 0 best_map = None # do bfs with each node as starting point for k in range(sp_cmap.shape[0]): bfs = cs.breadth_first_order(sp_cmap, i_start=k, directed=False, return_predecessors=False) connection_count = 0 for i in range(n_qubits): node_idx = bfs[i] for j in range(sp_cmap.indptr[node_idx], sp_cmap.indptr[node_idx + 1]): node = sp_cmap.indices[j] for counter in range(n_qubits): if node == bfs[counter]: connection_count += 1 break if connection_count > best: best = connection_count best_map = bfs[0:n_qubits] return best_map
def perform_connectivity_search(net, node_pit, slack_nodes, from_nodes, to_nodes, active_node_lookup, active_branch_lookup, mode="hydraulics"): len_nodes = len(node_pit) nobranch = np.sum(active_branch_lookup) active_from_nodes = from_nodes[active_branch_lookup] active_to_nodes = to_nodes[active_branch_lookup] # we create a "virtual" node that is connected to all slack nodes and start the connectivity # search at this node fn_matrix = np.concatenate([active_from_nodes, slack_nodes]) tn_matrix = np.concatenate([active_to_nodes, np.full(len(slack_nodes), len_nodes, dtype=np.int32)]) adj_matrix = coo_matrix((np.ones(nobranch + len(slack_nodes)), (fn_matrix, tn_matrix)), shape=(len_nodes + 1, len_nodes + 1)) # check which nodes are reachable from the virtual heat slack node reachable_nodes = csgraph.breadth_first_order(adj_matrix, len_nodes, False, False) # throw out the virtual heat slack node reachable_nodes = reachable_nodes[reachable_nodes != len_nodes] nodes_connected = np.zeros(len(active_node_lookup), dtype=np.bool) nodes_connected[reachable_nodes] = True if not np.all(nodes_connected[active_from_nodes] == nodes_connected[active_to_nodes]): raise ValueError( "An error occured in the %s connectivity check. Please contact the pandapipes development" \ " team!" % mode) branches_connected = active_branch_lookup & nodes_connected[from_nodes] oos_nodes = np.where(~nodes_connected & active_node_lookup)[0] is_nodes = np.where(nodes_connected & ~active_node_lookup)[0] if len(oos_nodes) > 0: msg = "\n".join("In table %s: %s" % (tbl, nds) for tbl, nds in get_table_index_list(net, node_pit, oos_nodes)) logger.info("Setting the following nodes out of service for %s calculation in connectivity" " check:\n%s" % (mode, msg)) if len(is_nodes) > 0: node_type_message = "\n".join("In table %s: %s" % (tbl, nds) for tbl, nds in get_table_index_list(net, node_pit, is_nodes)) if get_net_option(net, "quit_on_inconsistency_connectivity"): raise UserWarning( "The following nodes are connected to in_service branches in the %s calculation " "although being out of service, which leads to an inconsistency in the connectivity" " check!\n%s" % (mode, node_type_message)) logger.info("Setting the following nodes back in service for %s calculation in connectivity" " check as they are connected to in_service branches:\n%s" % (mode, node_type_message)) return nodes_connected, branches_connected
def find_furthest_pt(skel, root, single=True): num_nodes = len(skel.vertices) edges = skel.edges g = sp.coo_matrix((np.ones(len(edges),), (edges[:, 0], edges[:, 1])), shape=(num_nodes, num_nodes)) o = csgraph.breadth_first_order(g, root, directed=False, return_predecessors=False) furthest_node = o[-1] o2, preds = csgraph.breadth_first_order(g, furthest_node, directed=False, return_predecessors=True) path_inds = reconstruct_all_paths(preds) paths = [inds for inds in path_inds if root in inds] if single: assert len(path) == 1, "Too many paths" return furthest_node, paths[0] else: return furthest_node, paths
def best_subset(backend, n_qubits): """Computes the qubit mapping with the best connectivity. Parameters: backend (Qiskit.BaseBackend): A QISKit backend instance. n_qubits (int): Number of subset qubits to consider. Returns: ndarray: Array of qubits to use for best connectivity mapping. Raises: QISKitError: Wrong number of qubits given. """ if n_qubits == 1: return np.array([0]) elif n_qubits <= 0: raise QISKitError('Number of qubits <= 0.') device_qubits = backend.configuration['n_qubits'] if n_qubits > device_qubits: raise QISKitError('Number of qubits greater than device.') cmap = np.asarray(backend.configuration['coupling_map']) data = np.ones_like(cmap[:, 0]) sp_cmap = sp.coo_matrix((data, (cmap[:, 0], cmap[:, 1])), shape=(device_qubits, device_qubits)).tocsr() best = 0 best_map = None # do bfs with each node as starting point for k in range(sp_cmap.shape[0]): bfs = cs.breadth_first_order(sp_cmap, i_start=k, directed=False, return_predecessors=False) connection_count = 0 for i in range(n_qubits): node_idx = bfs[i] for j in range(sp_cmap.indptr[node_idx], sp_cmap.indptr[node_idx + 1]): node = sp_cmap.indices[j] for counter in range(n_qubits): if node == bfs[counter]: connection_count += 1 break if connection_count > best: best = connection_count best_map = bfs[0:n_qubits] return best_map
def _arrange_atoms_in_molecules(molecules: list): """ Check if the atoms in a Molecule object belongs to a single molecule. This is done to prevent errors from openbabel or the CSD Python API when assigning residues index. The check is performed by converting molecules to graphs and looking at their edges with the Breadth First Search algorithm. :param molecules: List of Molecule objects :return: """ from scipy.sparse import csr_matrix from scipy.sparse.csgraph import breadth_first_order new_molecules = list() molidx = 0 for molecule in molecules: graph = csr_matrix(molecule.contact_matrix) removed = [] for atom in range(len(molecule._atoms)): if atom in removed: continue bfs = breadth_first_order(graph, atom, False, False) removed = removed + list(bfs) new_molecule = Molecule(molecule._residue) new_molecule._index = molidx molidx += 1 new_molecule._atoms = [ molecule._atoms[i] for i in range(len(molecule._atoms)) if i in bfs ] new_molecule._natoms = len(new_molecule._atoms) for natom in new_molecule._atoms: natom._index = natom._index - (new_molecule._natoms * new_molecule._index) natom._bonds = [ bond - (new_molecule._natoms * new_molecule._index) for bond in natom._bonds ] new_molecule._calculate_centroid() new_molecule._forcefield = molecule._forcefield new_molecule._potential_energy = molecule._potential_energy new_molecule._generate_contact_matrix() new_molecules.append(new_molecule) return new_molecules
def find_mst_bridge(self): """Search for bridges to connect all labeled areas using the minimum spanning tree algorithm Returns: bridges : list of dict, i.e.: [{'label0': 1, 'label1': 3, 'x0': 345, 'x1': 191, 'y0': 1232, 'y1': 1089}, {'label0': 1, 'label1': 4, 'x0': 1143, 'x1': 1157, 'y0': 1204, 'y1': 1217}, {'label0': 1, 'label1': 5, 'x0': 557, 'x1': 565, 'y0': 1263, 'y1': 1270}, {'label0': 3, 'label1': 2, 'x0': 249, 'x1': 239, 'y0': 891, 'y1': 868}] """ if not hasattr(self, 'distMat'): self.get_all_bridge() # MST bridges with breadth_first_order distMatMst = csg.minimum_spanning_tree(self.distMat) succs, preds = csg.breadth_first_order(distMatMst, i_start=self.labelRef - 1, directed=False) # save to self.bridges self.bridges = [] for i in range(1, succs.size): n0 = preds[succs[i]] + 1 n1 = succs[i] + 1 # read conn if n0 > n1: nn = [str(n1), str(n0)] else: nn = [str(n0), str(n1)] conn = self.connDict['{}_{}'.format(nn[0], nn[1])] y0, x0 = conn[str(n0)] y1, x1 = conn[str(n1)] # save bdg bridge = dict() bridge['x0'] = x0 bridge['y0'] = y0 bridge['x1'] = x1 bridge['y1'] = y1 bridge['label0'] = n0 bridge['label1'] = n1 bridge['distance'] = ((x1 - x0)**2 + (y1 - y0)**2)**0.5 self.bridges.append(bridge) self.num_bridge = len(self.bridges) return self.bridges
def _best_subset(self, n_qubits): """Computes the qubit mapping with the best connectivity. Args: n_qubits (int): Number of subset qubits to consider. Returns: ndarray: Array of qubits to use for best connectivity mapping. """ if n_qubits == 1: return np.array([0]) device_qubits = self.coupling_map.size() cmap = np.asarray(self.coupling_map.get_edges()) data = np.ones_like(cmap[:, 0]) sp_cmap = sp.coo_matrix((data, (cmap[:, 0], cmap[:, 1])), shape=(device_qubits, device_qubits)).tocsr() best = 0 best_map = None # do bfs with each node as starting point for k in range(sp_cmap.shape[0]): bfs = cs.breadth_first_order(sp_cmap, i_start=k, directed=False, return_predecessors=False) connection_count = 0 for i in range(n_qubits): node_idx = bfs[i] for j in range(sp_cmap.indptr[node_idx], sp_cmap.indptr[node_idx + 1]): node = sp_cmap.indices[j] for counter in range(n_qubits): if node == bfs[counter]: connection_count += 1 break if connection_count > best: best = connection_count best_map = bfs[0:n_qubits] return best_map
def breadth_first_search(self, graph_dense, s_i, s_j, t_i, t_j): """ Performs a shortest path querry and returns the shortes path between some source cell and target cell expressed in costmap coordinates. This method is targeted for single querry. graph_dense : dense graph retpresentation of the costmap """ source_id = self.graph_id(s_i, s_j) target_id = self.graph_id(t_i, t_j) graph_sparse = csgraph.csgraph_from_dense(graph_dense) nodes, predecessors = csgraph.breadth_first_order( graph_sparse, source_id, directed=False, return_predecessors=True) path = [] path.append((t_i, t_j)) while True: target_id = predecessors[target_id] path.append(self.costmap_id(target_id)) if source_id == target_id: break return path
def unwrap(mol, anchor_atom_index=0): ''' Unwraps molecule in periodic cell. Makes minimum spanning tree of the molecule space continuous. Parameters: mol: Atoms object Input periodic model of interest. anchor_atom_index: index of the Atom that will be used as an anchor for the rest of molecule. Position of this atom will not be changed. Returns: Unwrapped molecule ''' assert 0 <= anchor_atom_index < len(mol) V = mol.get_all_distances(True, True) D = np.linalg.norm(V, axis=-1) st = minimum_spanning_tree(D).toarray() order, preds = breadth_first_order(st, anchor_atom_index, directed=False, return_predecessors=True) for i in order[1:]: mol.positions[i] = mol.positions[preds[i]] + V[preds[i], i, :] return mol
def path(self, start, end): sx, sy = start ex, ey = end scx = math.floor(sx / self.cell_size) scy = math.floor(sy / self.cell_size) s_encoding = scx * self.grid_size + scy ecx = math.floor(ex / self.cell_size) ecy = math.floor(ey / self.cell_size) e_encoding = ecx * self.grid_size + ecy D, Pr = breadth_first_order(self.m, s_encoding, directed=False) p = [] if (Pr[e_encoding] < 0): print('no path exists') return -1 p.append([ecx, ecy]) encoding = e_encoding while (Pr[encoding] >= 0): predecessor = Pr[encoding] x = predecessor // self.grid_size y = predecessor % self.grid_size p = [[x, y]] + p encoding = predecessor return p
def cluster_data_volume(self, cluster_id, intercluster_only=False): """ :param cluster_id: :param intercluster_only: :return: :rtype: pq.bit """ current_cluster = self._find_cluster(cluster_id) cluster_index = self.sim.clusters.index(current_cluster) cluster_tree, preds = sp.breadth_first_order(self.cluster_graph, cluster_index, directed=False, return_predecessors=True) children_indexes = list() for index in cluster_tree: if preds[index] == cluster_index: children_indexes.append(index) cluster_graph = self.cluster_graph.toarray() cluster_graph[cluster_index] = 0 cluster_graph[:, cluster_index] = 0 components_count, labels = sp.connected_components(cluster_graph, directed=False) cluster_groups = collections.defaultdict(list) for index, label in enumerate(labels): if index == cluster_index: continue cluster_groups[label].append(self.sim.clusters[index]) # Build a set of "super clusters." These are just collections of all # segments from the clusters that make up each child branch from the # current cluster. super_clusters = collections.defaultdict(list) for label, clusters in cluster_groups.items(): for cluster in clusters: super_clusters[label].extend(cluster.tour.objects) # Now, we get the list of segment pairs that will communicate through # the current cluster. This does not include the segments within the # current cluster, as those will be accounted for separately. sc_index_pairs = itertools.permutations(super_clusters.keys(), 2) segment_pairs = list() for src_sc_index, dst_sc_index in sc_index_pairs: src_segments = super_clusters[src_sc_index] dst_segments = super_clusters[dst_sc_index] segment_pairs.extend( list(itertools.product(src_segments, dst_segments))) intercluster_volume = np.sum([segment_volume(src, dst, self.env) for src, dst in segment_pairs]) if not intercluster_only: # NOW we calculate the intra-cluster volume segment_pairs = itertools.permutations( current_cluster.tour.objects, 2) intracluster_volume = np.sum([segment_volume(src, dst, self.env) for src, dst in segment_pairs]) else: intracluster_volume = 0 # ... and the outgoing data volume from this cluster other_segments = list( set(self.sim.segments) - set(current_cluster.tour.objects)) segment_pairs = itertools.product(current_cluster.tour.objects, other_segments) intercluster_volume += np.sum([segment_volume(s, d, self.env) for s, d in segment_pairs]) return intercluster_volume + intracluster_volume
def _run_bfswpf(ppci, options, **kwargs): """ SPARSE version of distribution power flow solution according to [1] :References: [1] Jen-Hao Teng, "A Direct Approach for Distribution System Load Flow Solutions", IEEE Transactions on Power Delivery, vol. 18, no. 3, pp. 882-887, July 2003. :param ppci: matpower-style case data :param options: pf options :return: results (pypower style), success (flag about PF convergence) """ time_start = time() # starting pf calculation timing baseMVA, bus, gen, branch, ref, pv, pq, \ on, gbus, V0 = _get_pf_variables_from_ppci(ppci) enforce_q_lims, tolerance_kva, max_iteration, calculate_voltage_angles, numba = _get_options( options) numba, makeYbus = _import_numba_extensions_if_flag_is_true(numba) nobus = bus.shape[0] nobranch = branch.shape[0] # generate Sbus Sbus = makeSbus(baseMVA, bus, gen) # generate results for original bus ordering # Ybus, Yf, Yt = makeYbus(baseMVA, bus, branch) ppci, Ybus, Yf, Yt = _get_Y_bus(ppci, options, makeYbus, baseMVA, bus, branch) # creating network graph from list of branches bus_from = branch[:, F_BUS].real.astype(int) bus_to = branch[:, T_BUS].real.astype(int) G = csr_matrix((np.ones(nobranch), (bus_from, bus_to)), shape=(nobus, nobus)) # create spanning trees using breadth-first-search # TODO add efficiency warning if a network is heavy-meshed G_trees = [] for refbus in ref: G_trees.append(csgraph.breadth_first_tree(G, refbus, directed=False)) # depth-first-search bus ordering and generating Direct Load Flow matrix DLF = BCBV * BIBC ppci, DLF, buses_ordered_bfs_nets = _get_bibc_bcbv( ppci, options, bus, branch, G) # if there are trafos with phase-shift calculate Ybus without phase-shift for bfswpf any_trafo_shift = (branch[:, SHIFT] != 0).any() if any_trafo_shift: branch_noshift = branch.copy() branch_noshift[:, SHIFT] = 0 Ybus_noshift, Yf_noshift, _ = makeYbus(baseMVA, bus, branch_noshift) else: Ybus_noshift = Ybus.copy() # get current injections for constant-current loads Ibus = _get_ibus(ppci) # #----- run the power flow ----- V_final, success = _bfswpf(DLF, bus, gen, branch, baseMVA, Ybus_noshift, Sbus, Ibus, V0, ref, pv, pq, buses_ordered_bfs_nets, enforce_q_lims, tolerance_kva, max_iteration, **kwargs) # if phase-shifting trafos are present adjust final state vector angles accordingly if calculate_voltage_angles and any_trafo_shift: brch_shift_mask = branch[:, SHIFT] != 0 trafos_shift = dict( list( zip( list( zip(branch[brch_shift_mask, F_BUS].real.astype(int), branch[brch_shift_mask, T_BUS].real.astype(int))), branch[brch_shift_mask, SHIFT].real))) for trafo_ind, shift_degree in iteritems(trafos_shift): neti = 0 # if multiple reference nodes, find in which network trafo is located if len(ref) > 0: for refbusi in range(len(ref)): if trafo_ind[0] in buses_ordered_bfs_nets[refbusi]: neti = refbusi break G_tree = G_trees[neti] buses_ordered_bfs = buses_ordered_bfs_nets[neti] if (np.argwhere(buses_ordered_bfs == trafo_ind[0]) < np.argwhere(buses_ordered_bfs == trafo_ind[1])): lv_bus = trafo_ind[1] shift_degree *= -1 else: lv_bus = trafo_ind[0] buses_shifted_from_root = csgraph.breadth_first_order( G_tree, lv_bus, directed=True, return_predecessors=False) V_final[buses_shifted_from_root] *= np.exp(1j * np.pi / 180 * shift_degree) # #----- output results to ppc ------ ppci["et"] = time() - time_start # pf time end bus, gen, branch = pfsoln(baseMVA, bus, gen, branch, Ybus, Yf, Yt, V_final, ref) # bus, gen, branch = pfsoln_bfsw(baseMVA, bus, gen, branch, V_final, ref, pv, pq, BIBC, ysh_f,ysh_t,Iinj, Sbus) ppci["success"] = success ppci["bus"], ppci["gen"], ppci["branch"] = bus, gen, branch return ppci, success
def _make_bibc_bcbv(bus, branch, graph): """ performs depth-first-search bus ordering and creates Direct Load Flow (DLF) matrix which establishes direct relation between bus current injections and voltage drops from each bus to the root bus :param ppc: matpower-type case data :return: DLF matrix DLF = BIBC * BCBV where BIBC - Bus Injection to Branch-Current BCBV - Branch-Current to Bus-Voltage ppc with bfs ordering original bus names bfs ordered (used to convert voltage array back to normal) """ nobus = bus.shape[0] nobranch = branch.shape[0] # reference bus is assumed as root bus for a radial network refs = bus[bus[:, BUS_TYPE] == 3, BUS_I] norefs = len(refs) G = graph.copy() # network graph # dictionary with impedance values keyed by branch tuple (frombus, tobus) # TODO use list or array, not both branches_lst = list( zip(branch[:, F_BUS].real.astype(int), branch[:, T_BUS].real.astype(int))) branches_arr = branch[:, F_BUS:T_BUS + 1].real.astype(int) branches_ind_dict = dict( zip(zip(branches_arr[:, 0], branches_arr[:, 1]), range(0, nobranch))) branches_ind_dict.update( dict( zip(zip(branches_arr[:, 1], branches_arr[:, 0]), range(0, nobranch)))) tap = branch[:, TAP] # * np.exp(1j * np.pi / 180 * branch[:, SHIFT]) z_ser = (branch[:, BR_R].real + 1j * branch[:, BR_X].real) * tap # series impedance z_brch_dict = dict(zip(branches_lst, z_ser)) # initialization of lists for building sparse BIBC and BCBV matrices rowi_BIBC = [] coli_BIBC = [] data_BIBC = [] data_BCBV = [] buses_ordered_bfs_nets = [] for ref in refs: # ordering buses according to breadth-first-search (bfs) buses_ordered_bfs, predecs_bfs = csgraph.breadth_first_order( G, ref, directed=False, return_predecessors=True) buses_ordered_bfs_nets.append(buses_ordered_bfs) branches_ordered_bfs = list( zip(predecs_bfs[buses_ordered_bfs[1:]], buses_ordered_bfs[1:])) G_tree = csgraph.breadth_first_tree(G, ref, directed=False) # if multiple networks get subnetwork branches if norefs > 1: branches_sub_mask = ( np.in1d(branches_arr[:, 0], buses_ordered_bfs) & np.in1d(branches_arr[:, 1], buses_ordered_bfs)) branches = np.sort(branches_arr[branches_sub_mask, :], axis=1) else: branches = np.sort(branches_arr, axis=1) # identify loops if graph is not a tree branches_loops = [] if G_tree.nnz < branches.shape[0]: G_tree_nnzs = G_tree.nonzero() branches_tree = np.sort(np.array([G_tree_nnzs[0], G_tree_nnzs[1]]).T, axis=1) branches_loops = ( set(zip(branches[:, 0], branches[:, 1])) - set(zip(branches_tree[:, 0], branches_tree[:, 1]))) # #------ building BIBC and BCBV martrices ------ # branches in trees brchi = 0 for brch in branches_ordered_bfs: tree_down, predecs = csgraph.breadth_first_order( G_tree, brch[1], directed=True, return_predecessors=True) if len(tree_down) == 1: # If at leaf pass if brch in z_brch_dict: z_br = z_brch_dict[brch] else: z_br = z_brch_dict[brch[::-1]] rowi_BIBC += [branches_ind_dict[brch]] * len(tree_down) coli_BIBC += list(tree_down) data_BCBV += [z_br] * len(tree_down) data_BIBC += [1] * len(tree_down) # branches from loops for loop_i, brch_loop in enumerate(branches_loops): path_lens, path_preds = csgraph.shortest_path( G_tree, directed=False, indices=brch_loop, return_predecessors=True) init, end = brch_loop loop = [end] while init != end: end = path_preds[0, end] loop.append(end) loop_size = len(loop) coli_BIBC += [nobus + loop_i] * loop_size for i in range(len(loop)): brch = (loop[i - 1], loop[i]) if np.argwhere(buses_ordered_bfs == brch[0]) < np.argwhere( buses_ordered_bfs == brch[1]): brch_direct = 1 else: brch_direct = -1 data_BIBC.append(brch_direct) if brch in branches_ind_dict: rowi_BIBC.append(branches_ind_dict[brch]) else: rowi_BIBC.append(branches_ind_dict[brch[::-1]]) if brch in z_brch_dict: data_BCBV.append(z_brch_dict[brch] * brch_direct) else: data_BCBV.append(z_brch_dict[brch[::-1]] * brch_direct) brchi += 1 # construction of the BIBC matrix # column indices correspond to buses: assuming root bus is always 0 after ordering indices are subtracted by 1 BIBC = csr_matrix((data_BIBC, (rowi_BIBC, np.array(coli_BIBC) - norefs)), shape=(nobranch, nobranch)) BCBV = csr_matrix((data_BCBV, (rowi_BIBC, np.array(coli_BIBC) - norefs)), shape=(nobranch, nobranch)).transpose() if BCBV.shape[0] > nobus - 1: # if nbrch > nobus - 1 -> network has loops DLF_loop = BCBV * BIBC # DLF = [A M.T ] # [M N ] A = DLF_loop[0:nobus - 1, 0:nobus - 1] M = DLF_loop[nobus - 1:, 0:nobus - 1] N = DLF_loop[nobus - 1:, nobus - 1:].A # considering the fact that number of loops is relatively small, N matrix is expected to be small and dense # ...in that case dense version is more efficient, i.e. N is transformed to dense and # inverted using sp.linalg.inv(N) DLF = A - M.T * csr_matrix(sp.linalg.inv(N)) * M # Kron's Reduction else: # no loops -> radial network DLF = BCBV * BIBC return DLF, buses_ordered_bfs_nets
def amfs1level( W: lil_matrix, Sigma: lil_matrix = None, delta=0.1, thresh_kld=1e-6, priority=True, verbose=True, ): if Sigma is None: Sigma = compute_sigma(W, delta) N = W.shape[-1] not_arrived = np.arange(N) nodes = breadth_first_order(W, i_start=0, return_predecessors=False) not_arrived = np.setdiff1d(not_arrived, nodes) s1 = [0] s2 = [] nodes = nodes[1:] while len(not_arrived) > 0: new_root = not_arrived[0] other_nodes = breadth_first_order(W, i_start=new_root, return_predecessors=False) not_arrived = np.setdiff1d(not_arrived, other_nodes) s1.append(new_root) nodes = np.append(nodes, other_nodes[1:]) balance_flag = True for i, v in enumerate(nodes): if verbose: print("handling {:5d}-th node: {:5d}, ".format(i, v), end="") N1 = len(s1) s = [*s1, v, *s2] W_local = W[np.ix_(s, s)] Wb1 = W_local.copy() Wb2 = W_local.copy() Wb2[:N1, :N1] = 0 Wb2[N1:, N1:] = 0 Wb1[:N1 + 1, :N1 + 1] = 0 Wb1[N1 + 1:, N1 + 1:] = 0 if priority: # KLD holds priority S_local = Sigma[np.ix_(s, s)] DK1 = dkl(Wb1, S_local, delta) DK2 = dkl(Wb2, S_local, delta) diff = DK1 - DK2 if verbose: print("DK1-DK2: {:5f}".format(diff)) if abs(diff) > thresh_kld: if diff > 0: s2.append(v) else: s1.append(v) else: rank1 = structural_rank(Wb1.tocsr()) rank2 = structural_rank(Wb2.tocsr()) if rank1 > rank2: s1.append(v) elif rank1 < rank2: s2.append(v) else: if balance_flag: s1.append(v) else: s2.append(v) balance_flag = not balance_flag else: rank1 = structural_rank(Wb1) rank2 = structural_rank(Wb2) if rank1 > rank2: s1.append(v) elif rank1 < rank2: s2.append(v) else: S_local = Sigma[np.ix_(s, s)] DK1 = dkl(Wb1, S_local, delta) DK2 = dkl(Wb2, S_local, delta) if DK1 < DK2: s1.append(v) elif DK1 > DK2: s2.append(v) else: if balance_flag: s1.append(v) else: s2.append(v) balance_flag = not balance_flag return s1, s2
# + [markdown] pycharm={"name": "#%% md\n"} # It looks like `scipy.sparse.csgraph` has some of the reordering algorithms mentioned in the lecture notes. # At least: # # ### 1. BFS levelset # _This could also be 2. BFS queue, depending on the actual implementation?_ # # Note that `i_start` is the index of the adjacency matrix. # Thus, even though we have indexed our nodes starting at 1, in the adjacency matrix the indices start from 0 (as they always do in Python). # + pycharm={"name": "#%%\n"} import scipy.sparse.csgraph as csgraph csgraph.breadth_first_order(gm.adjacency_matrix(G), i_start=0, return_predecessors=False) # + [markdown] pycharm={"name": "#%% md\n"} # ### 4. Reverse Cuthill McKee # + pycharm={"name": "#%%\n"} csgraph.reverse_cuthill_mckee(gm.adjacency_matrix(G)) # + [markdown] pycharm={"name": "#%% md\n"} # # Grids # # ## 450. Triangular grid # + pycharm={"name": "#%%\n"} from scipy.spatial import Delaunay
def _best_subset(self, num_qubits): """Computes the qubit mapping with the best connectivity. Args: num_qubits (int): Number of subset qubits to consider. Returns: ndarray: Array of qubits to use for best connectivity mapping. """ if num_qubits == 1: return np.array([0]) if num_qubits == 0: return [] device_qubits = self.coupling_map.size() cmap = np.asarray(self.coupling_map.get_edges()) data = np.ones_like(cmap[:, 0]) sp_cmap = sp.coo_matrix( (data, (cmap[:, 0], cmap[:, 1])), shape=(device_qubits, device_qubits) ).tocsr() best = 0 best_map = None best_error = np.inf best_sub = None # do bfs with each node as starting point for k in range(sp_cmap.shape[0]): bfs = cs.breadth_first_order( sp_cmap, i_start=k, directed=False, return_predecessors=False ) connection_count = 0 sub_graph = [] for i in range(num_qubits): node_idx = bfs[i] for j in range(sp_cmap.indptr[node_idx], sp_cmap.indptr[node_idx + 1]): node = sp_cmap.indices[j] for counter in range(num_qubits): if node == bfs[counter]: connection_count += 1 sub_graph.append([node_idx, node]) break if self.backend_prop: curr_error = 0 # compute meas error for subset avg_meas_err = np.mean(self.meas_arr) meas_diff = np.mean(self.meas_arr[bfs[0:num_qubits]]) - avg_meas_err if meas_diff > 0: curr_error += self.num_meas * meas_diff cx_err = np.mean([self.cx_mat[edge[0], edge[1]] for edge in sub_graph]) if self.coupling_map.is_symmetric: cx_err /= 2 curr_error += self.num_cx * cx_err if connection_count >= best and curr_error < best_error: best = connection_count best_error = curr_error best_map = bfs[0:num_qubits] best_sub = sub_graph else: if connection_count > best: best = connection_count best_map = bfs[0:num_qubits] best_sub = sub_graph # Return a best mapping that has reduced bandwidth mapping = {} for edge in range(best_map.shape[0]): mapping[best_map[edge]] = edge new_cmap = [[mapping[c[0]], mapping[c[1]]] for c in best_sub] rows = [edge[0] for edge in new_cmap] cols = [edge[1] for edge in new_cmap] data = [1] * len(rows) sp_sub_graph = sp.coo_matrix((data, (rows, cols)), shape=(num_qubits, num_qubits)).tocsr() perm = cs.reverse_cuthill_mckee(sp_sub_graph) best_map = best_map[perm] return best_map