def test_s_t_blossom(self): graph = retworkx.PyGraph() graph.extend_from_weighted_edge_list([ (1, 2, 9), (1, 3, 8), (2, 3, 10), (1, 4, 5), (4, 5, 4), (1, 6, 3), ]) self.compare_match_sets( retworkx.max_weight_matching(graph, weight_fn=lambda x: x, verify_optimum=True), {(1, 6), (2, 3), (4, 5)}, ) graph.remove_edge(1, 6) graph.remove_edge(4, 5) graph.extend_from_weighted_edge_list([(4, 5, 3), (1, 6, 4)]) self.compare_match_sets( retworkx.max_weight_matching(graph, weight_fn=lambda x: x, verify_optimum=True), {(1, 6), (2, 3), (4, 5)}, ) graph.remove_edge(1, 6) graph.add_edge(3, 6, 4) self.compare_match_sets( retworkx.max_weight_matching(graph, weight_fn=lambda x: x, verify_optimum=True), {(1, 2), (3, 6), (4, 5)}, )
def test_path_graph(self): graph = retworkx.PyGraph() graph.extend_from_weighted_edge_list([(1, 2, 5), (2, 3, 11), (3, 4, 5)]) self.compare_match_sets( retworkx.max_weight_matching(graph, weight_fn=lambda x: x, verify_optimum=True), { (2, 3), }) self.compare_match_sets( retworkx.max_weight_matching(graph, True, weight_fn=lambda x: x, verify_optimum=True), {(1, 2), (3, 4)})
def test_nested_blossom_expand_recursively(self): graph = retworkx.PyGraph() graph.extend_from_weighted_edge_list([ (1, 2, 40), (1, 3, 40), (2, 3, 60), (2, 4, 55), (3, 5, 55), (4, 5, 50), (1, 8, 15), (5, 7, 30), (7, 6, 10), (8, 10, 10), (4, 9, 30), ]) self.compare_match_sets( retworkx.max_weight_matching(graph, weight_fn=lambda x: x, verify_optimum=True), match_dict_to_set({ 1: 2, 2: 1, 3: 5, 4: 9, 5: 3, 6: 7, 7: 6, 8: 10, 9: 4, 10: 8 }), )
def test_nested_s_blossom_relabel_expand(self): graph = retworkx.PyGraph() graph.extend_from_weighted_edge_list([ (1, 2, 19), (1, 3, 20), (1, 8, 8), (2, 3, 25), (2, 4, 18), (3, 5, 18), (4, 5, 13), (4, 7, 7), (5, 6, 7), ]) self.compare_match_sets( retworkx.max_weight_matching(graph, weight_fn=lambda x: x, verify_optimum=True), match_dict_to_set({ 1: 8, 2: 3, 3: 2, 4: 7, 5: 6, 6: 5, 7: 4, 8: 1 }), )
def test_blossom_relabel_multiple_paths_least_slack(self): graph = retworkx.PyGraph() graph.extend_from_weighted_edge_list([ (1, 2, 45), (1, 5, 45), (2, 3, 50), (3, 4, 45), (4, 5, 50), (1, 6, 30), (3, 9, 35), (4, 8, 28), (5, 7, 26), (9, 10, 5), ]) self.compare_match_sets( retworkx.max_weight_matching(graph, weight_fn=lambda x: x, verify_optimum=True), match_dict_to_set({ 1: 6, 2: 3, 3: 2, 4: 8, 5: 7, 6: 1, 7: 5, 8: 4, 9: 10, 10: 9 }), )
def test_gnm_random_against_networkx(self): rx_graph = retworkx.undirected_gnm_random_graph(10, 13, seed=42) nx_graph = networkx.Graph(list(rx_graph.edge_list())) nx_matches = networkx.max_weight_matching(nx_graph) rx_matches = retworkx.max_weight_matching(rx_graph, verify_optimum=True) self.compare_rx_nx_sets(rx_graph, rx_matches, nx_matches, 42, nx_graph)
def test_single_edge_no_verification(self): graph = retworkx.PyGraph() graph.add_nodes_from([0, 1]) graph.add_edges_from([(0, 1, 1)]) self.compare_match_sets( retworkx.max_weight_matching(graph, verify_optimum=False), { (0, 1), })
def test_gnp_random_against_networkx_max_cardinality(self): rx_graph = retworkx.undirected_gnp_random_graph(10, 0.78, seed=428) nx_graph = networkx.Graph(list(rx_graph.edge_list())) nx_matches = networkx.max_weight_matching(nx_graph, maxcardinality=True) rx_matches = retworkx.max_weight_matching(rx_graph, max_cardinality=True, verify_optimum=True) self.compare_rx_nx_sets(rx_graph, rx_matches, nx_matches, 428, nx_graph)
def test_s_blossom(self): graph = retworkx.PyGraph() graph.extend_from_weighted_edge_list([ (0, 1, 8), (0, 2, 9), (1, 2, 10), (2, 3, 7), ]) self.compare_match_sets( retworkx.max_weight_matching(graph, weight_fn=lambda x: x, verify_optimum=True), {(0, 1), (2, 3)}) graph.extend_from_weighted_edge_list([(0, 5, 5), (3, 4, 6)]) self.compare_match_sets( retworkx.max_weight_matching(graph, weight_fn=lambda x: x, verify_optimum=True), {(0, 5), (1, 2), (3, 4)})
def test_small_graph(self): graph = retworkx.PyGraph() graph.extend_from_weighted_edge_list([(1, 2, 10), (2, 3, 11)]) self.compare_match_sets( retworkx.max_weight_matching(graph, weight_fn=lambda x: x, verify_optimum=True), { (2, 3), }, )
def test_negative_weights(self): graph = retworkx.PyGraph() graph.extend_from_weighted_edge_list([ (1, 2, 2), (1, 3, -2), (2, 3, 1), (2, 4, -1), (3, 4, -6), ]) self.compare_match_sets( retworkx.max_weight_matching(graph, weight_fn=lambda x: x, verify_optimum=True), { (1, 2), }) self.compare_match_sets( retworkx.max_weight_matching(graph, True, weight_fn=lambda x: x, verify_optimum=True), {(1, 3), (2, 4)})
def test_gnp_random_against_networkx(self): for i in range(1024): with self.subTest(i=i): rx_graph = retworkx.undirected_gnp_random_graph(10, 0.75, seed=42 + i) nx_graph = networkx.Graph(list(rx_graph.edge_list())) nx_matches = networkx.max_weight_matching(nx_graph) rx_matches = retworkx.max_weight_matching(rx_graph, verify_optimum=True) self.compare_rx_nx_sets(rx_graph, rx_matches, nx_matches, 42 + i, nx_graph)
def test_gnp_random_against_networkx(self): for i in range(1024): # TODO: add back subTest usage on new testtools release rx_graph = retworkx.undirected_gnp_random_graph(10, 0.75, seed=42 + i) nx_graph = networkx.Graph(list(rx_graph.edge_list())) nx_matches = networkx.max_weight_matching(nx_graph) rx_matches = retworkx.max_weight_matching(rx_graph, verify_optimum=True) self.compare_rx_nx_sets(rx_graph, rx_matches, nx_matches, 42 + i, nx_graph)
def nearest_cluster(self, cluster, graph, target): """Find the nearest cluster to the target cluster. Args: cluster (dict): Dictionary that contains clusters in the Error graph and the nodes in it. graph (retworkx.PyGraph):Error graph in which the nearest cluster and the node will be searched. target (int,int,int) : target cluster for which nearest cluster is being searched. Returns: list: [nearest_outside_node, nearest_cluster] nearest_outside_node : nearest node to the target node which doesn't belong to the same cluster. nearest_cluster : cluster to which nearest outside node belongs. """ cluster_graph = rx.PyGraph() cluster_graph.add_nodes_from(graph.nodes()) cluster_graph.add_edges_from(graph.weighted_edge_list()) for i, __ in enumerate(graph.nodes()): if __ not in cluster[target]: cluster_graph.remove_node(i) edges = rx.max_weight_matching(cluster_graph, max_cardinality=True, weight_fn=lambda x: x) remaining_node = list(cluster_graph.node_indexes()) for edge in edges: remaining_node.remove(edge[0]) remaining_node.remove(edge[1]) node_neigbours = {} for edge in graph.weighted_edge_list(): if remaining_node[0] == edge[0]: node_neigbours[graph[edge[1]]] = {'weight': edge[2]} if remaining_node[0] == edge[1]: node_neigbours[graph[edge[0]]] = {'weight': edge[2]} nearest_neighbours = sorted(node_neigbours.items(), key=lambda e: e[1]["weight"], reverse=True)[:len(cluster[target])] nearest_outside_node = [ x[0] for x in nearest_neighbours if x[0] not in cluster[target] ] for x in cluster.keys(): if nearest_outside_node[0] in cluster[x]: nearest_cluster = x return [nearest_outside_node[0], nearest_cluster]
def test_nested_s_blossom(self): graph = retworkx.PyGraph() graph.extend_from_weighted_edge_list([ (1, 2, 9), (1, 3, 9), (2, 3, 10), (2, 4, 8), (3, 5, 8), (4, 5, 10), (5, 6, 6), ]) expected = {(1, 3), (2, 4), (5, 6)} self.compare_match_sets( retworkx.max_weight_matching(graph, weight_fn=lambda x: x, verify_optimum=True), expected)
def test_gnp_random_against_networkx_with_negative_weight(self): for i in range(1024): with self.subTest(i=i): random.seed(i) rx_graph = retworkx.undirected_gnp_random_graph(10, 0.75, seed=42 + i) for edge in rx_graph.edge_list(): rx_graph.update_edge(*edge, random.randint(-5000, 5000)) nx_graph = networkx.Graph([(x[0], x[1], { "weight": x[2] }) for x in rx_graph.weighted_edge_list()]) nx_matches = networkx.max_weight_matching(nx_graph) rx_matches = retworkx.max_weight_matching( rx_graph, weight_fn=lambda x: x, verify_optimum=True) self.compare_rx_nx_sets(rx_graph, rx_matches, nx_matches, 42 + i, nx_graph)
def test_s_blossom_relabel_expand(self): graph = retworkx.PyGraph() graph.extend_from_weighted_edge_list([ (1, 2, 23), (1, 5, 22), (1, 6, 15), (2, 3, 25), (3, 4, 22), (4, 5, 25), (4, 8, 14), (5, 7, 13), ]) self.compare_match_sets( retworkx.max_weight_matching(graph, weight_fn=lambda x: x, verify_optimum=True), {(1, 6), (2, 3), (4, 8), (5, 7)}, )
def test_gnp_random_against_networkx_with_weight(self): for i in range(1024): # TODO: add back subTest usage on new testtools release random.seed(i) rx_graph = retworkx.undirected_gnp_random_graph(10, .75, seed=42 + i) for edge in rx_graph.edge_list(): rx_graph.update_edge(*edge, random.randint(0, 5000)) nx_graph = networkx.Graph([(x[0], x[1], { 'weight': x[2] }) for x in rx_graph.weighted_edge_list()]) nx_matches = networkx.max_weight_matching(nx_graph) rx_matches = retworkx.max_weight_matching(rx_graph, weight_fn=lambda x: x, verify_optimum=True) self.compare_rx_nx_sets(rx_graph, rx_matches, nx_matches, 42 + i, nx_graph)
def test_nested_s_blossom_relabel(self): graph = retworkx.PyGraph() graph.extend_from_weighted_edge_list([ (1, 2, 10), (1, 7, 10), (2, 3, 12), (3, 4, 20), (3, 5, 20), (4, 5, 25), (5, 6, 10), (6, 7, 10), (7, 8, 8), ]) self.compare_match_sets( retworkx.max_weight_matching(graph, weight_fn=lambda x: x, verify_optimum=True), {(1, 2), (3, 4), (5, 6), (7, 8)}, )
def test_nested_blossom_augmented(self): graph = retworkx.PyGraph() graph.extend_from_weighted_edge_list([ (1, 2, 45), (1, 7, 45), (2, 3, 50), (3, 4, 45), (4, 5, 95), (4, 6, 94), (5, 6, 94), (6, 7, 50), (1, 8, 30), (3, 11, 35), (5, 9, 36), (7, 10, 26), (11, 12, 5), ]) expected = { 1: 8, 2: 3, 3: 2, 4: 6, 5: 9, 6: 4, 7: 10, 8: 1, 9: 5, 10: 7, 11: 12, 12: 11, } self.compare_match_sets( retworkx.max_weight_matching(graph, weight_fn=lambda x: x, verify_optimum=True), match_dict_to_set(expected), )
def matching(self, string): """ Args: string (str): A string describing the output from the code. Returns: str: A string with corrected logical values, computed using minimum weight perfect matching. Additional information: This function can be run directly, or used indirectly to calculate a logical error probability with `get_logical_prob` """ # this matching algorithm is designed for a single graph E = self.make_error_graph(string)['0'] # set up graph that is like E, but each syndrome node is connected to a # separate copy of the nearest logical node E_matching = rx.PyGraph(multigraph=False) syndrome_nodes = [] logical_nodes = [] logical_neighbours = [] node_map = {} for node in E.nodes(): node_map[node] = E_matching.add_node(node) if node[0] == 0: logical_nodes.append(node) else: syndrome_nodes.append(node) for source in syndrome_nodes: for target in syndrome_nodes: if target != (source): E_matching.add_edge( node_map[source], node_map[target], E.get_edge_data(node_map[source], node_map[target])) potential_logical = {} for target in logical_nodes: potential_logical[target] = E.get_edge_data( node_map[source], node_map[target]) nearest_logical = max(potential_logical, key=potential_logical.get) nl_target = nearest_logical + source if nl_target not in node_map: node_map[nl_target] = E_matching.add_node(nl_target) E_matching.add_edge(node_map[source], node_map[nl_target], potential_logical[nearest_logical]) logical_neighbours.append(nl_target) for source in logical_neighbours: for target in logical_neighbours: if target != (source): E_matching.add_edge(node_map[source], node_map[target], 0) # do the matching on this matches = {(E_matching[x[0]], E_matching[x[1]]) for x in rx.max_weight_matching( E_matching, max_cardinality=True, weight_fn=lambda x: x) } # use it to construct and return a corrected logical string logicals = self._separate_string(string)[0] for (source, target) in matches: if source[0] == 0 and target[0] != 0: logicals[source[1]] = str((int(logicals[source[1]]) + 1) % 2) if target[0] == 0 and source[0] != 0: logicals[target[1]] = str((int(logicals[target[1]]) + 1) % 2) logical_string = '' for logical in logicals: logical_string += logical + ' ' logical_string = logical_string[:-1] return logical_string
def test_empty_graph(self): graph = retworkx.PyGraph() self.assertEqual(retworkx.max_weight_matching(graph), set())
def cluster_decoding(self, string, eps=4): """Graph theoritical decoder that uses Clustering and matching to decode errors. Args: string (str): A string describing the output from the code. eps (int):The maximum distance between two samples for one to be considered as in the neighborhood of the other. This is not a maximum bound on the distances of points within a cluster. This is the most important DBSCAN parameter to choose appropriately for your data set and distance function. Default value here is 4. Returns: str: A string with corrected logical values, computed using clustering and matching. Raises: QiskitError: if scikit-learn is not installed Additional information: This function can be run directly, or used indirectly to calculate a logical error probability with `get_logical_prob` """ if not HAS_SCIKIT: raise QiskitError('please install scikit-learn') graph = self.make_error_graph(string)['0'] logical_nodes = [(0, 0, 0), (0, 1, 0)] Non_neutral_nodes = list(graph.nodes()) for _ in logical_nodes: Non_neutral_nodes.remove(_) # Trivial Case if len(Non_neutral_nodes) == 0: logicals = self._separate_string(string)[0] logical_string = '' for logical in logicals: logical_string += logical + ' ' logical_string = logical_string[:-1] return logical_string # Cluster Decoder corrected_logical_string = [] clustering = DBSCAN(eps=eps, min_samples=2, metric='manhattan').fit(Non_neutral_nodes) cluster = {_: [] for _ in set(clustering.labels_)} for _, __ in zip(clustering.labels_, Non_neutral_nodes): cluster[_].append(__) # appending logical nodes as separate clusters cluster['logical_0'] = [logical_nodes[0]] cluster['logical_1'] = [logical_nodes[1]] unmatched_node = True while unmatched_node: for _ in cluster.keys(): if len(cluster[_] ) % 2 != 0 and _ != 'logical_0' and _ != 'logical_1': s = self.nearest_cluster(cluster, graph, _) if s[1] == 'logical_0' or s[1] == 'logical_1': corrected_logical_string.append(s[1][-1]) cluster[_].append(s[0]) else: cluster[_] = cluster[_] + cluster[s[1]] cluster.pop(s[1]) break else: unmatched_node = False neutral_nodelist = [] edgelist = [] for _ in cluster.keys(): cluster_graph = rx.PyGraph() cluster_graph.add_nodes_from(graph.nodes()) cluster_graph.add_edges_from(graph.weighted_edge_list()) for i, __ in enumerate(graph.nodes()): if __ not in cluster[_]: cluster_graph.remove_node(i) edges = [ (cluster_graph[x[0]], cluster_graph[x[1]]) for x in rx.max_weight_matching( cluster_graph, max_cardinality=True, weight_fn=lambda x: x) ] edgelist = edgelist + edges neutral_nodelist += [k[0] for k in list(edges) ] + [k[1] for k in list(edges)] # use it to construct and return a corrected logical string logicals = self._separate_string(string)[0] for (source, target) in edgelist: if source[0] == 0 and target[0] != 0: logicals[source[1]] = str((int(logicals[source[1]]) + 1) % 2) if target[0] == 0 and source[0] != 0: logicals[target[1]] = str((int(logicals[target[1]]) + 1) % 2) logical_string = '' for logical in logicals: logical_string += logical + ' ' logical_string = logical_string[:-1] return [logical_string, edgelist, neutral_nodelist]
def test_single_self_edge(self): graph = retworkx.PyGraph() graph.extend_from_weighted_edge_list([(0, 0, 100)]) self.assertEqual(retworkx.max_weight_matching(graph), set())