def multiple(size, iters, pE=0, pX=0, pZ=0, plot_load=False, qres=None, worker=0, seeds=None, config=None, **kwargs): """ Runs the peeling decoder for a number of iterations. The graph is reused for speedup. """ # import uf config if config is None: config = decoder_config() if seeds is None: seeds = [ te.init_random_seed(worker=worker, iteration=iter) for iter in range(iters) ] graph = go.init_toric_graph(size) result = [ single(size, pE, pX, pZ, plot_load, graph, worker, i, seed, config) for i, seed in ProgIter(zip(range(iters), seeds)) ] N_succes = sum(result) if qres is not None: qres.put(N_succes) else: return N_succes
def multiple(iters, size, p, worker=0, qres=None): graph = go.init_toric_graph(size) results = [single(graph, p, it, worker) for it in ProgIter(range(iters))] if qres is not None: qres.put(results) else: return results
def multiple(size, iters, pX=0, qres=None, worker=None): """ Runs the peeling decoder for a number of iterations. The graph is reused for speedup. """ if worker == None: print(f"L = {size}, p = {pX}") worker = 0 graph0 = go.init_toric_graph(size) graph1 = go.init_toric_graph(size) result = [ single(size, pX, graph0, graph1, worker, i) for i in ProgIter(range(iters)) ] suc_count = dd(int) for key in result: suc_count[key] += 1 if qres is not None: qres.put(suc_count) else: return suc_count
def single(size, pE=0, pX=0, pZ=0, plot_load=False, graph=None, worker=0, iter=0, seed=None, config=None, **kwargs): """ Runs the peeling decoder for one iteration """ # import uf config if config is None: config = decoder_config() # Initialize lattice if graph is None: graph = go.init_toric_graph(size) toric_plot = tp.lattice_plot(graph, **config.plot) if plot_load else None # Initialize errors te.init_random_seed(worker=worker, iteration=iter) if pE != 0: te.init_erasure_region(graph, pE, toric_plot, **config.file) # te.init_erasure(graph, pE, savefile, erasure_file, toric_plot=toric_plot, worker=worker) te.init_pauli(graph, pX, pZ, toric_plot, **config.file) else: te.init_random_seed(worker=worker, iteration=iter) te.init_erasure(graph, pE) te.init_pauli(graph, pX, pZ) graph.measure_stab() dc.get_matching_blossom5() if type == "planar": dc.remove_virtual() dc.apply_matching() _, correct = go.logical_error(graph) return correct
size = 20 pX = 0.09 pZ = 0.0 pE = 0.0 def countmatching(graph): count = 0 for edge in graph.E.values(): if edge.matching: count += 1 return count graph0 = go.init_toric_graph(size) graph = go.init_toric_graph(size) counter = 0 total = 0 while counter < 100: print(total) total += 1 seed = te.init_random_seed() te.apply_random_seed(seed) te.init_pauli(graph0, pX, pZ) tc.measure_stab(graph0) matching = tc.get_matching_mwpm(graph0)
con, cur = sql_connection() print("\nGetting count of L{}, p{}...".format(lattice, p)) cur.execute( "SELECT tree_wins, list_wins FROM cases WHERE lattice = {} and p = {}" .format(lattice, p)) tlcount = cur.fetchone() countp[(lattice, p)] = [tlcount[0], tlcount[1]] cur.execute(fetch_query("COUNT(*)", lattice, p)) num = cur.fetchone()[0] print("fetching {} simulations...".format(num)) cur.execute(fetch_query("ftree_tlist, seed", lattice, p)) sims = [cur.fetchone()] graph = go.init_toric_graph(lattice) fetched = 1 while sims != [None]: print("{:0.1f}%".format(fetched / num * 100)) sims += cur.fetchmany(maxfetch - 1) fetched += maxfetch for type, seed in sims: # Get errors from seed te.apply_random_seed(seed) te.init_pauli(graph, pX=float(p)) n = len([
p = 0.1 limit = 10 ftree_tlist = True con, cur = sql_connection() query = fetch_query("comp_id, created_on, seed", L, p, extra="ftree_tlist = " + str(ftree_tlist), limit=limit) cur.execute(query) sims = cur.fetchall() cur.close() con.close() graph_t = go.init_toric_graph(L) graph_l = go.init_toric_graph(L) for comp_id, created_on, seed in sims: time = created_on.strftime("%Y-%m-%d_%H-%M-%S") winner = "list" if ftree_tlist else "tree" name = f"L{L}_p{p}_{comp_id}_{time}_{seed}_{winner}" fileh = logging.FileHandler(f"./logs/{name}.log") formatter = logging.Formatter("%(message)s") fileh.setFormatter(formatter) log = logging.getLogger() # root logger for hdlr in log.handlers[:]: # remove all old handlers log.removeHandler(hdlr) log.addHandler(fileh) pr.printlog(
def single(size, pX=0, graph0=None, graph1=None, worker=0, iter=0): """ Runs the peeling decoder for one iteration """ # size, pX, graph, worker, iter = 28, 0.09, None, 0, 0 # Initialize lattice if graph0 is None: graph0 = go.init_toric_graph(size) else: graph0.reset() if graph1 is None: graph1 = go.init_toric_graph(size) else: graph1.reset() seed = te.init_random_seed(worker=worker, iteration=iter) te.init_pauli(graph0, pX) te.apply_random_seed(seed) te.init_pauli(graph1, pX) tc.measure_stab(graph0) tc.measure_stab(graph1) uf0 = uf.cluster_farmer(graph0) uf1 = uf.cluster_farmer(graph1) uf0.find_clusters() uf1.find_clusters() # Analyze clusters after bucket 0 growth uf0.tree_grow_bucket(graph0.buckets[0], 0) cl0 = cca.get_support2clusters(graph0, size, minl, maxl) uf1.list_grow_bucket(graph1.buckets[0], 0) cl1 = cca.get_support2clusters(graph1, size, minl, maxl) clist, normc0, normc1 = [], [], [] for key, val in data_p.items(): clist.append(key) normc0.append(val[(size, pX)][0][0]) normc1.append(val[(size, pX)][0][1]) count0 = get_count(cl0, clist, size) count1 = get_count(cl1, clist, size) num0, num1 = countp[(size, pX)] val0 = sum([ d / (d + abs(c - d)**1.002) for d, c in [(n / num0, c) for n, c in zip(normc0, count0)] if d > 0 ]) val1 = sum([ d / (d + abs(c - d)**1) for d, c in [(n / num1, c) for n, c in zip(normc1, count1)] if d > 0 ]) choice = "tree" if val0 > val1 else "list" if choice == "tree": uf0.grow_clusters(method="tree", start_bucket=1) uf0.peel_clusters() graph = graph0 else: uf1.grow_clusters(method="list", start_bucket=1) uf1.peel_clusters() graph = graph1 # Measure logical operator logical_error = tc.logical_error(graph) correct = 1 if logical_error == [False, False, False, False] else 0 return correct, choice
# Measure logical operator graph_u.reset() graph_v.reset() if __name__ == "__main__": L = 12 p = 0.09 limit = 10 type = "vcomb" con, cur = sql_connection() query = fetch_query("*", p=p, L=L, type=type, limit=limit) cur.execute(query) sims = cur.fetchall() cur.close() con.close() graph_u = go.init_toric_graph(L) graph_v = go.init_toric_graph(L) for _, _, _, comp_id, created_on, ubuck_win, _, array in sims: print("Displaying sim by", comp_id, "created on", created_on) winner = "UBUCK" if ubuck_win else "VCOMB" print("Winner:", winner) plot_both(graph_u, graph_v, array)