def in_support(H, keep_idxs, check=False): # find span of H _contained within idxs support n = H.shape[1] remove_idxs = [i for i in range(n) if i not in keep_idxs] A = identity2(n) A = A[keep_idxs] H1 = intersect(A, H) if check: lhs = set(str(x) for x in span(A)) rhs = set(str(x) for x in span(H)) meet = lhs.intersection(rhs) assert meet == set(str(x) for x in span(H1)) return H1
def distance(self): Lx = list(solve.span(self.Lx)) dx = self.n for u in solve.span(self.Hx): for v in Lx: w = (u+v)%2 d = w.sum() if 0<d<dx: dx = d Lz = list(solve.span(self.Lz)) dz = self.n for u in solve.span(self.Hz): for v in Lz: w = (u+v)%2 d = w.sum() if 0<d<dz: dz = d return dx, dz
def get_distance(self): G = self.G d = None for v in span(G): w = v.sum() if w == 0: continue if d is None or w < d: d = w if self.d is None: self.d = d return d
def z_weld(acode, bcode, pairs): for c in [acode, bcode]: print("Lx:") print(c.Lx) print("Lz:") print(c.Lz) print("Hx:") print(c.Hx) print("Hz:") print(c.Hz) print("-------------------") mx = acode.mx + bcode.mx #for (i, j) in pairs: assert len(set(pairs)) == len(pairs) # uniq n = acode.n + bcode.n - len(pairs) # Hx = zeros2(mx, n) # Hx[:acode.mx, :acode.n] = acode.Hx # Hx[acode.mx:, acode.n-len(pairs):] = bcode.Hx # # az = acode.mz + bcode.mz + acode.k + bcode.k # Az = zeros2(az, n) # r0, r1 = 0, acode.mz # Az[r0:r1, :acode.n] = acode.Hx; r0, r1 = r1, r1+len(acode.Hx) # Az[r0:r1, acode.n-len(pairs):] = bcode.Hx; r0, r1 = r1, r1+len(bcode.Hx) ## Az[r0:r1, :acode.n] = acode.Lz; r0, r1 = r1, r1+len(acode.Lz) ## #assert r1 == len(Az), (r1, len(Az)) ## Az[r0:r1, acode.n-len(pairs):] = bcode.Lz; r0, r1 = r1, r1+len(bcode.Lz) # # print("Az:") # print(Az) #print(Az) Hz = [] for z in span(Az): #print(z) #print( dot2(Hx, z.transpose())) if dot2(Hx, z.transpose()).sum() == 0: Hz.append(z) Hz = array2(Hz) Hz = row_reduce(Hz) print("Hx:") print(Hx) print("Hz:") print(Hz)
def decode(self, p, err_op, verbose=False, **kw): from qupy.ldpc.metro import metropolis #print "decode:" #strop = self.strop #print strop(err_op) T = self.get_T(err_op) all_Lx = list(solve.span(self.Lx)) M0 = argv.get('M0', 10000) N0 = argv.get("N0", 1) best_l = None best_q = -self.n best_i = None best_T = None #print #print "T:" #print strop(T) for j in range(N0): for i, l_op in enumerate(all_Lx): #print "l_op:" #print strop(l_op) T1 = (T + l_op) % 2 # for h in self.Hx: # if random()<0.5: # T1 += h # T1 %= 2 #q = -self.metropolis(p, T1, M0) q = -metropolis(p, T1, M0, self.Hx) #write("(%d)"%q) #print "T %d:"%i #print strop(T1) if q > best_q: best_l = l_op best_q = q best_i = i best_T = T1 #print "_"*79 #write(":%d "%best_i) #print "best_T" #print strop(best_T) T += best_l T %= 2 #print "T" #print strop(T) return T
def main(): import models assert not argv.orbiham, "it's called orbigraph now" if argv.find_ideals: find_ideals() return Gx, Gz, Hx, Hz = models.build() if argv.chainmap: do_chainmap(Gx, Gz) if argv.symmetry: do_symmetry(Gx, Gz, Hx, Hz) return #print shortstrx(Gx, Gz) if argv.report: print("Hz:") for i, h in enumerate(Hz): print(i, shortstr(h), h.sum()) #print shortstr(find_stabilizers(Gx, Gz)) Lz = find_logops(Gx, Hz) Lx = find_logops(Gz, Hx) #print "Lz:", shortstr(Lz) if Lz.shape[0]*Lz.shape[1]: print(Lz.shape, Gx.shape) check_commute(Lz, Gx) check_commute(Lz, Hx) Px = get_reductor(Hx) # projector onto complement of rowspan of Hx Pz = get_reductor(Hz) Rz = [dot2(Pz, g) for g in Gz] Rz = array2(Rz) Rz = row_reduce(Rz, truncate=True) rz = len(Rz) n = Gx.shape[1] print("n =", n) if len(Lx): print("Lx Lz:") print(shortstrx(Lx, Lz)) print("Hx:", len(Hx), "Hz:", len(Hz)) print("Gx:", len(Gx), "Gz:", len(Gz)) Rx = [dot2(Px, g) for g in Gx] Rx = array2(Rx) Rx = row_reduce(Rx, truncate=True) rx = len(Rx) print("Rx:", rx, "Rz:", rz) if argv.show: print(shortstrx(Rx, Rz)) Qx = u_inverse(Rx) Pxt = Px.transpose() assert eq2(dot2(Rx, Qx), identity2(rx)) assert eq2(dot2(Rx, Pxt), Rx) #print shortstr(dot2(Pxt, Qx)) PxtQx = dot2(Pxt, Qx) lines = [shortstr(dot2(g, PxtQx)) for g in Gx] lines.sort() #print "PxtQx:" #for s in lines: # print s #print "RzRxt" #print shortstr(dot2(Rz, Rx.transpose())) offset = argv.offset if len(Hz): Tx = find_errors(Hz, Lz, Rz) else: Tx = zeros2(0, n) if argv.dense: dense(**locals()) return if argv.dense_full: dense_full(**locals()) return if argv.show_delta: show_delta(**locals()) return if argv.slepc: slepc(**locals()) return # if argv.orbigraph: # from linear import orbigraph # orbigraph(**locals()) # return v0 = None # excite = argv.excite # if excite is not None: # v0 = zeros2(n) # v0[excite] = 1 verts = [] lookup = {} for i, v in enumerate(span(Rx)): # XXX does not scale well if v0 is not None: v = (v+v0)%2 v = dot2(Px, v) lookup[v.tobytes()] = i verts.append(v) print("span:", len(verts)) assert len(lookup) == len(verts) mz = len(Gz) n = len(verts) if argv.lie: U = [] for i, v in enumerate(verts): count = dot2(Gz, v).sum() Pxv = dot2(Px, v) assert count == dot2(Gz, Pxv).sum() U.append(mz - 2*count) uniq = list(set(U)) uniq.sort(reverse=True) s = ', '.join("%d(%d)"%(val, U.count(val)) for val in uniq) print(s) print("sum:", sum(U)) return if n <= 1024 and argv.solve: H = numpy.zeros((n, n)) syndromes = [] for i, v in enumerate(verts): syndromes.append(dot2(Gz, v)) count = dot2(Gz, v).sum() Pxv = dot2(Px, v) assert count == dot2(Gz, Pxv).sum() H[i, i] = mz - 2*count for g in Gx: v1 = (g+v)%2 v1 = dot2(Px, v1) j = lookup[v1.tobytes()] H[i, j] += 1 if argv.showham: s = lstr2(H, 0).replace(', ', ' ') s = s.replace(' 0', ' .') s = s.replace(', -', '-') print(s) vals, vecs = numpy.linalg.eigh(H) show_eigs(vals) if argv.show_partition: beta = argv.get("beta", 1.0) show_partition(vals, beta) if argv.orbigraph: if argv.symplectic: H1 = build_orbigraph(H, syndromes) else: H1 = build_orbigraph(H) print("orbigraph:") print(H1) vals, vecs = numpy.linalg.eig(H1) show_eigs(vals) elif argv.sparse: print("building H", end=' ') A = {} # adjacency U = [] # potential if offset is None: offset = mz + 1 # make H positive definite for i, v in enumerate(verts): if i%1000==0: write('.') count = dot2(Gz, v).sum() #H[i, i] = mz - 2*count U.append(offset + mz - 2*count) for g in Gx: v1 = (g+v)%2 v1 = dot2(Px, v1) j = lookup[v1.tobytes()] A[i, j] = A.get((i, j), 0) + 1 print("\nnnz:", len(A)) if argv.lanczos: vals, vecs = do_lanczos(A, U) elif argv.orbigraph: vals, vecs = do_orbigraph(A, U) else: return vals -= offset # offset doesn't change vecs show_eigs(vals) elif argv.orbigraph: assert n<=1024 H = numpy.zeros((n, n)) syndromes = [] for i, v in enumerate(verts): syndromes.append(dot2(Gz, v)) count = dot2(Gz, v).sum() Pxv = dot2(Px, v) assert count == dot2(Gz, Pxv).sum() H[i, i] = mz - 2*count for g in Gx: v1 = (g+v)%2 v1 = dot2(Px, v1) j = lookup[v1.tobytes()] H[i, j] += 1 if argv.showham: s = lstr2(H, 0).replace(', ', ' ') s = s.replace(' 0', ' .') s = s.replace(', -', '-') print(s) if argv.symplectic: H1 = build_orbigraph(H, syndromes) else: H1 = build_orbigraph(H)
def __init__(self, code): Decoder.__init__(self, code) self.all_Lx = list(solve.span(self.Lx))
def __init__(self, code): Decoder.__init__(self, code) self.all_Lx = list(solve.span(self.Lx)) self.graph = Tanner(self.Hx)
def sparse_ham_eigs(self, excite=None, weights=None, Jx=1., Jz=1.): key = str((excite, weights, Jx, Jz)) if key in self.cache: return self.cache[key] Gx, Gz = self.Gx, self.Gz Rx, Rz = self.Rx, self.Rz Hx, Hz = self.Hx, self.Hz Tx, Tz = self.Tx, self.Tz Px, Pz = self.Px, self.Pz gz = len(Gz) r = len(Rx) n = self.n if type(excite) is int: _excite = [0] * len(Tx) _excite[excite] = 1 excite = tuple(_excite) if excite is not None: assert len(excite) == len(Tx) t = zeros2(n) for i, ex in enumerate(excite): if ex: t = (t + Tx[i]) % 2 #print "t:", shortstr(t) Gzt = dot2(Gz, t) else: Gzt = 0 verts = [] lookup = {} for i, v in enumerate(span(Rx)): # XXX does not scale well #if v0 is not None: # v = (v+v0)%2 # v = dot2(Px, v) lookup[v.tostring()] = i verts.append(v) print("span:", len(verts)) assert len(lookup) == len(verts) mz = len(Gz) n = len(verts) print("building H", end=' ') H = {} # adjacency U = [] # potential #if offset is None: offset = mz + 1 # make H positive definite for i, v in enumerate(verts): if i % 1000 == 0: write('.') #count = dot2(Gz, v).sum() syndrome = (dot2(Gz, v) + Gzt) % 2 count = syndrome.sum() #syndrome = (dot2(Gz, Rx.transpose(), v) + Gzt)%2 #H[i, i] = mz - 2*count U.append(offset + mz - 2 * count) for g in Gx: v1 = (g + v) % 2 v1 = dot2(Px, v1) j = lookup[v1.tostring()] H[i, j] = H.get((i, j), 0) + 1 print("\nnnz:", len(H)) for i in range(len(U)): H[i, i] = H.get((i, i), 0) + U[i] N = len(U) del U #H1 = sparse.lil_matrix(N, N) keys = list(H.keys()) keys.sort() data = [] rows = [] cols = [] for idx in keys: #H1[idx] = H[idx] data.append(H[idx]) rows.append(idx[0]) cols.append(idx[1]) del H H1 = sparse.coo_matrix((data, (rows, cols)), (N, N)) H1 = sparse.csr_matrix(H1, dtype=numpy.float64) #print "do_lanczos: eigsh" vals, vecs = sparse.linalg.eigsh(H1, k=min(N - 5, 40), which="LM") vals -= offset self.cache[key] = vals return vals
model = build_model() # um.... print(model) if argv.show: print("Hx/Hz:") print(shortstrx(model.Hx, model.Hz)) print() print("Gx/Gz:") print(shortstrx(Gx, Gz)) print() print("Lx/Lz:") print(shortstrx(model.Lx, model.Lz)) if len(model.Lx) and argv.distance: w = min([v.sum() for v in span(model.Lx) if v.sum()]) print("distance:", w) if argv.do_lp: model.do_lp() if argv.do_slepc: model.do_slepc() if argv.solve: vals = model.sparse_ham_eigs() print(vals) if argv.minweight: v = minweight(model.Hz) print("minweight:")
def find_triorth(m, k): # Bravyi, Haah, 1209.2426v1 sec IX. # https://arxiv.org/pdf/1209.2426.pdf verbose = argv.get("verbose") #m = argv.get("m", 6) # _number of rows #k = argv.get("k", None) # _number of odd-weight rows # these are the variables N_x xs = list(cross([(0, 1)] * m)) maxweight = argv.maxweight minweight = argv.get("minweight", 1) xs = [x for x in xs if minweight <= sum(x)] if maxweight: xs = [x for x in xs if sum(x) <= maxweight] N = len(xs) lhs = [] rhs = [] # bi-orthogonality for a in range(m): for b in range(a + 1, m): v = zeros2(N) for i, x in enumerate(xs): if x[a] == x[b] == 1: v[i] = 1 if v.sum(): lhs.append(v) rhs.append(0) # tri-orthogonality for a in range(m): for b in range(a + 1, m): for c in range(b + 1, m): v = zeros2(N) for i, x in enumerate(xs): if x[a] == x[b] == x[c] == 1: v[i] = 1 if v.sum(): lhs.append(v) rhs.append(0) # # dissallow columns with weight <= 1 # for i, x in enumerate(xs): # if sum(x)<=1: # v = zeros2(N) # v[i] = 1 # lhs.append(v) # rhs.append(0) if k is not None: # constrain to k _number of odd-weight rows assert 0 <= k < m for a in range(m): v = zeros2(N) for i, x in enumerate(xs): if x[a] == 1: v[i] = 1 lhs.append(v) if a < k: rhs.append(1) else: rhs.append(0) A = array2(lhs) rhs = array2(rhs) #print(shortstr(A)) B = pseudo_inverse(A) soln = dot2(B, rhs) if not eq2(dot2(A, soln), rhs): print("no solution") return if verbose: print("soln:") print(shortstr(soln)) soln.shape = (N, 1) rhs.shape = A.shape[0], 1 K = array2(list(find_kernel(A))) #print(K) #print( dot2(A, K.transpose())) #sols = [] #for v in span(K): best = None density = 1.0 size = 99 * N trials = argv.get("trials", 1024) count = 0 for trial in range(trials): u = rand2(len(K), 1) v = dot2(K.transpose(), u) #print(v) v = (v + soln) % 2 assert eq2(dot2(A, v), rhs) if v.sum() > size: continue size = v.sum() Gt = [] for i, x in enumerate(xs): if v[i]: Gt.append(x) if not Gt: continue Gt = array2(Gt) G = Gt.transpose() assert is_morthogonal(G, 3) if G.shape[1] < m: continue if 0 in G.sum(1): continue if argv.strong_morthogonal and not strong_morthogonal(G, 3): continue #print(shortstr(G)) # for g in G: # print(shortstr(g), g.sum()) # print() _density = float(G.sum()) / (G.shape[0] * G.shape[1]) #if best is None or _density < density: if best is None or G.shape[1] <= size: best = G size = G.shape[1] density = _density if 0: #sols.append(G) Gx = even_rows(G) assert is_morthogonal(Gx, 3) if len(Gx) == 0: continue GGx = array2(list(span(Gx))) assert is_morthogonal(GGx, 3) count += 1 print("found %d solutions" % count) if best is None: return G = best #print(shortstr(G)) for g in G: print(shortstr(g), g.sum()) print() print("density:", density) print("shape:", G.shape) G = linear_independent(G) if 0: A = list(span(G)) print(strong_morthogonal(A, 1)) print(strong_morthogonal(A, 2)) print(strong_morthogonal(A, 3)) G = [row for row in G if row.sum() % 2 == 0] return array2(G) #print(shortstr(dot2(G, G.transpose()))) if 0: B = pseudo_inverse(A) v = dot2(B, rhs) print("B:") print(shortstr(B)) print("v:") print(shortstr(v)) assert eq2(dot2(B, v), rhs)
def build_all(self): self.all_Lx = list(solve.span(self.Lx)) #self.all_Lz = list(solve.span(self.Lz)) self.all_Hx = list(solve.span(self.Hx))
def test_stean(): Hx = parse(""" ...1111 .11..11 1.1.1.1 1.11.1. .1111.. """) Hx = parse(""" ...1111 .11..11 1.1.1.1 """) Hz = parse(""" ...1111 .11..11 1.1.1.1 """) A = list(a for a in solve.span(Hz) if a.sum()) A = array2(A) print(shortstr(A)) Hx = A Hz = Hx.copy() X = Chain([Hx, Hz.transpose()]) X.check() # for i in range(-1, 3): # print "homology", i # print shortstr(X.homology(i)) # X.dumpcodes() # print # print "=========="*10 # print XX = X.tensor(X) XX.dumpcodes() return for L in XX.Ls: print(L.shape) code = XX.get_code(1) print(code) print(code.weightsummary()) #code.save("stean2_147_33.ldpc") return #XX = XX.tensor(X) XX.dumpcodes() return #code = XX.get_code(1) #code.save('stean2.ldpc') X4 = XX.tensor(XX) X4.dumpcodes()
def bruhat(): n = argv.get("n", 4) assert n % 2 == 0, repr(n) m = argv.get("m", 2) q = argv.get("q", 2) # symplectic form A = mk_form(n, q) # all non-zero vectors vals = list(range(q)) vecs = list(cross((vals, ) * n)) assert sum(vecs[0]) == 0 vecs.pop(0) # find unique spaces spaces = set() for U in cross_upper(vecs, m): U = numpy.array(U) U.shape = m, n B = numpy.dot(U, numpy.dot(A, U.transpose())) % q if B.max(): continue space = Space(U, q) if space.m != m: continue spaces.add(space) if 0: #space = [str(v) for v in span(U)] # SLOW space = [v.tostring() for v in span(U)] # q==2 only if len(space) != q**m: continue space.sort() #space = ''.join(space) #print(space) space = tuple(space) spaces.add(space) N = len(spaces) print("points:", N) if argv.verbose: for X in spaces: print(X) B = list(borel_sp(n, q)) print("borel:", len(B)) assert len(B) spaces = list(spaces) lookup = dict((space, i) for (i, space) in enumerate(spaces)) orbits = list(set([space]) for space in spaces) perms = [] for g in B: perm = [] for i, space in enumerate(spaces): U = numpy.dot(space.U, g) % q t = Space(U, q) #if t not in lookup: # print(space) # print(t) perm.append(lookup[t]) perms.append(perm) print(".", end=" ", flush=True) print() remain = set(range(N)) orbits = [] while remain: i = iter(remain).__next__() remain.remove(i) orbit = [i] for perm in perms: j = perm[i] if j in remain: remain.remove(j) orbit.append(j) orbits.append(orbit) orbits.sort(key=len) print("%d orbits:" % len(orbits)) for orbit in orbits: print("size =", len(orbit)) for idx in orbit: space = spaces[idx] U = space.U if q == 2: U = row_reduce(U)