def uniform(nsite, backend='numpy'): backend = tensorbackends.get(backend) shape = (1, 2) unit_vector = backend.astensor( np.array([1, 1], dtype=complex).reshape(shape) / np.sqrt(2)) factors = [unit_vector for _ in range(nsite)] return CanonicalDecomp(factors, backend)
def random(nsite, *, backend='numpy'): backend = tensorbackends.get(backend) shape = (2, ) * nsite tensor = backend.random.uniform( -1, 1, shape) + 1j * backend.random.uniform(-1, 1, shape) tensor /= backend.norm(tensor) return StateVector(tensor, backend)
def computational_zeros(nrow, ncol, backend='numpy'): backend = tensorbackends.get(backend) grid = np.empty((nrow, ncol), dtype=object) for i, j in np.ndindex(nrow, ncol): grid[i, j] = backend.astensor( np.array([1, 0], dtype=complex).reshape(1, 1, 1, 1, 2, 1)) return PEPS(grid, backend)
def __init__(self, factors, backend): self.backend = tensorbackends.get(backend) self.factors = factors self.prev_factors = None self.theta = 0. self.fidelity_lower = 1. self.fidelity_avg = 1.
def get_7_nodes_conjecture_state(backend): tb = tensorbackends.get(backend) zero = tb.astensor(np.asarray([1., 0.])) one = tb.astensor(np.asarray([0., 1.])) plus = tb.astensor(1. / np.sqrt(2) * np.asarray([1., 1.])) minus = tb.astensor(1. / np.sqrt(2) * np.asarray([1., -1.])) out1 = 1. / (2 * np.sqrt(2)) * tb.einsum( "a,b,c,d,e,f,g->abcdefg", minus, one, minus, zero, plus, zero, plus) out2 = -1. / (2) * tb.einsum("a,b,c,d,e,f,g->abcdefg", one, minus, zero, plus, zero, minus, one) out3 = -1. / (2) * tb.einsum("a,b,c,d,e,f,g->abcdefg", one, plus, one, plus, one, plus, one) out4 = 1. / (2 * np.sqrt(2)) * tb.einsum( "a,b,c,d,e,f,g->abcdefg", plus, zero, minus, one, minus, zero, plus) out5 = 1. / (2 * np.sqrt(2)) * tb.einsum( "a,b,c,d,e,f,g->abcdefg", plus, zero, minus, one, plus, one, minus) out6 = 1. / (2 * np.sqrt(2)) * tb.einsum( "a,b,c,d,e,f,g->abcdefg", plus, zero, plus, zero, minus, one, minus) out7 = -1. / (2) * tb.einsum("a,b,c,d,e,f,g->abcdefg", one, plus, one, minus, zero, minus, one) out8 = 1. / (2 * np.sqrt(2)) * tb.einsum("a,b,c,d,e,f,g->abcdefg", minus, one, plus, one, minus, zero, plus) out9 = -1. / (2) * tb.einsum("a,b,c,d,e,f,g->abcdefg", one, minus, zero, minus, one, plus, one) out10 = 1. / (2 * np.sqrt(2)) * tb.einsum("a,b,c,d,e,f,g->abcdefg", minus, one, plus, one, plus, one, minus) out11 = 1. / (2 * np.sqrt(2)) * tb.einsum( "a,b,c,d,e,f,g->abcdefg", plus, zero, plus, zero, plus, zero, plus) out12 = 1. / (2 * np.sqrt(2)) * tb.einsum( "a,b,c,d,e,f,g->abcdefg", minus, one, minus, zero, minus, one, minus) out = out1 + out2 + out3 + out4 + out5 + out6 + out7 + out8 + out9 + out10 + out11 + out12 return out.ravel()
def load(dirname): with open(os.path.join(dirname, 'koala_peps.json')) as file: meta = json.load(file) backend = tensorbackends.get(meta['backend']) grid = np.empty((meta['nrow'], meta['ncol']), dtype=object) for i, j in np.ndindex(*grid.shape): grid[i, j] = backend.load(os.path.join(dirname, f'{i}_{j}')) return PEPS(grid, backend)
def computational_basis(nrow, ncol, bits, backend='numpy'): backend = tensorbackends.get(backend) bits = np.asarray(bits).reshape(nrow, ncol) grid = np.empty_like(bits, dtype=object) for i, j in np.ndindex(*bits.shape): grid[i, j] = backend.astensor( np.array([0, 1] if bits[i, j] else [1, 0], dtype=complex).reshape(1, 1, 1, 1, 2, 1)) return PEPS(grid, backend)
def qft_inv_input(nsite, theta, backend='numpy'): backend = tensorbackends.get(backend) factors = [] shape = (1, 2) for i in range(nsite): factors.append( backend.astensor( np.array([1, np.exp(1j * 2 * np.pi * 2**i * theta)], dtype=complex).reshape(shape) / np.sqrt(2))) return CanonicalDecomp(factors, backend)
def rectangular_pulse(nsite, backend='numpy'): assert nsite % 2 == 0 shape = (1, 2) backend = tensorbackends.get(backend) factors = [] for _ in range(int(nsite / 2)): factors.append(backend.astensor(np.array([1, 0]).reshape(shape))) for _ in range(int(nsite / 2), nsite): factors.append(backend.astensor(np.array([1, 1]).reshape(shape))) return CanonicalDecomp(factors, backend)
def hx(nsite, backend='numpy'): backend = tensorbackends.get(backend) assert nsite % 2 == 0 shape = (1, 2) unit_vector = backend.astensor( np.array([1, 1], dtype=complex).reshape(shape) / np.sqrt(2)) one_vector = backend.astensor( np.array([0, 1], dtype=complex).reshape(shape)) factors = [unit_vector for _ in range(nsite // 2) ] + [one_vector for _ in range(nsite // 2)] return CanonicalDecomp(factors, backend)
def random(nrow, ncol, rank, physical_dim=2, dual_dim=1, backend='numpy'): backend = tensorbackends.get(backend) grid = np.empty((nrow, ncol), dtype=object) for i, j in np.ndindex(nrow, ncol): shape = ( rank if i > 0 else 1, rank if j < ncol - 1 else 1, rank if i < nrow - 1 else 1, rank if j > 0 else 1, physical_dim, dual_dim, ) grid[i, j] = backend.random.uniform( -1, 1, shape) + 1j * backend.random.uniform(-1, 1, shape) return PEPS(grid, backend)
def complete_graph_input(nsite, backend='numpy'): backend = tensorbackends.get(backend) assert nsite % 2 == 0 nsite_vertex = nsite // 2 shape = (1, 2) unit_vector = backend.astensor( np.array([1, 1], dtype=complex).reshape(shape) / np.sqrt(2)) factors = [[unit_vector] for _ in range(nsite)] for num in range(2**nsite_vertex): list_binary_str = list(format(num, "b")) length = len(list_binary_str) for i in range(nsite_vertex - length): factors[i].append( backend.astensor( np.array([1, 0], dtype=complex).reshape(shape) / np.sqrt(2))) factors[i + nsite_vertex].append( backend.astensor(np.array([1, 0]).reshape(shape) / np.sqrt(2))) for i in range(length): j = i + nsite_vertex - length if list_binary_str[i] == '0': factors[j].append( backend.astensor( np.array([1, 0], dtype=complex).reshape(shape) / np.sqrt(2))) factors[j + nsite_vertex].append( backend.astensor( np.array([1, 0]).reshape(shape) / np.sqrt(2))) else: factors[j].append( backend.astensor( np.array([0, 1], dtype=complex).reshape(shape) / np.sqrt(2))) factors[j + nsite_vertex].append( backend.astensor( np.array([0, 1]).reshape(shape) / np.sqrt(2))) for i in range(1, len(factors[0])): factors[0][i] = -factors[0][i] for i in range(nsite): factors[i] = backend.vstack(tuple(factors[i])) assert factors[i].shape == (1 + 2**nsite_vertex, 2) return CanonicalDecomp(factors, backend)
def test_qft_with_full_rank(self, backend): nsite = 8 # maximum 14 debug = False tb = tensorbackends.get(backend) qstate = candecomp.random(nsite=nsite, rank=1, backend=backend) statevector = qstate.get_statevector() qft_candecomp(qstate, debug=debug) out_statevector = qstate.get_statevector() if isinstance(statevector.unwrap(), np.ndarray): out_true = tb.astensor(fft(statevector.ravel(), norm="ortho")) elif isinstance(statevector.unwrap(), ctf.core.tensor): out_true = tb.astensor( fft(statevector.ravel().to_nparray(), norm="ortho")) self.assertTrue( np.isclose(tb.norm(out_statevector.ravel() - out_true), 0.))
def bipartite_uniform(nsite, backend='numpy'): assert nsite % 2 == 0 nsite_vertices = nsite // 2 - 1 backend = tensorbackends.get(backend) shape = (1, 2) unit_vector = backend.astensor( np.array([1, 1], dtype=complex).reshape(shape) / np.sqrt(2)) factors = [ backend.astensor(np.array([1, 0], dtype=complex).reshape(shape)) ] for _ in range(nsite_vertices): factors.append(unit_vector) factors.append( backend.astensor(np.array([0, 1], dtype=complex).reshape(shape))) for _ in range(nsite_vertices): factors.append(unit_vector) return CanonicalDecomp(factors, backend)
def als_w(): tb = tensorbackends.get('numpy') factors = W_state(4) for factor in factors: print(factor) t1 = candecomp.CanonicalDecomp(factors, 'numpy').get_statevector() out_factors, _ = candecomp.als.als(factors, tb, 15, tol=1e-14, max_iter=20000, inner_iter=20, init_als='random', num_als_init=100, debug=True) for factor in out_factors: print(factor) t2 = candecomp.CanonicalDecomp(out_factors, 'numpy').get_statevector() print(tb.norm(t1 - t2))
def W_state(num_products): tb = tensorbackends.get('numpy') if num_products == 1: factors = [None for _ in range(3)] factors[0] = tb.asarray([[1. + 0j, 0.], [1., 0.], [0., 1.]]) factors[1] = tb.asarray([[1. + 0j, 0.], [0., 1.], [1., 0.]]) factors[2] = tb.asarray([[0. + 0j, 1.], [1., 0.], [1., 0.]]) return factors factors = W_state(num_products=num_products - 1) factors_w = W_state(num_products=1) for i in range(len(factors)): factors[i] = tb.vstack((factors[i], factors[i], factors[i])) for i in range(3): v1 = tb.vstack( tuple([factors_w[i][0] for _ in range(3**(num_products - 1))])) v2 = tb.vstack( tuple([factors_w[i][1] for _ in range(3**(num_products - 1))])) v3 = tb.vstack( tuple([factors_w[i][2] for _ in range(3**(num_products - 1))])) factors.append(tb.vstack((v1, v2, v3))) return factors
def __init__(self, tensor, backend): self.backend = tensorbackends.get(backend) self.tensor = tensor
debug = True mode = "loop" cpdmode = "direct" # ALS arguments rank_threshold = 2 cp_tol = 1e-8 cp_maxiter = 100 cp_inneriter = 20 num_als_init = 3 init_als = 'random' assert nsite % 2 == 0 nsite_vertices = nsite // 2 tb = tensorbackends.get(backend) marked_states = build_marked_states(nsite_vertices, mode) print(marked_states) marked_states_factors = [ get_factors_from_state(state, tb) for state in marked_states ] tracemalloc.start() qstate = qwalk_candecomp(nsite, backend=backend, rank_threshold=rank_threshold, cp_tol=cp_tol, cp_maxiter=cp_maxiter,
def random(nsite, rank, backend='numpy'): backend = tensorbackends.get(backend) factors = initialize_random_factors(rank, nsite, backend) return CanonicalDecomp(factors, backend)
def computational_basis(nsite, bits, *, backend='numpy'): backend = tensorbackends.get(backend) bits = np.asarray(bits).reshape(nsite) tensor = backend.zeros((2, ) * nsite, dtype=complex) tensor[tuple(bits)] = 1 return StateVector(tensor, backend)
def basis(nsite, backend='numpy'): backend = tensorbackends.get(backend) shape = (1, 2) unit_vector = backend.astensor(np.array([1, 0]).reshape(shape)) factors = [unit_vector for _ in range(nsite)] return CanonicalDecomp(factors, backend)
def computational_zeros(nsite, *, backend='numpy'): backend = tensorbackends.get(backend) tensor = backend.zeros((2, ) * nsite, dtype=complex) tensor[(0, ) * nsite] = 1 return StateVector(tensor, backend)
def __init__(self, grid, backend): self.backend = tensorbackends.get(backend) self.grid = grid
def swap_local_pair_local_gram_qr_svd(state, x_pos, y_pos, rank): if x_pos[0] < y_pos[0]: # [x y]^T gram_x_subscripts = 'abcdxp,abCdXP->xpcXPC' gram_y_subscripts = 'cfghyq,CfghYQ->yqcYQC' xq_subscripts = 'abcdxp,xpci->abdi' yq_subscripts = 'cfghyq,yqcj->fghj' recover_x_subscripts = 'abcdxp,cxpsyq->absdyq' recover_y_subscripts = 'cfghyq,cyqsxp->sfghxp' elif x_pos[0] > y_pos[0]: # [y x]^T gram_x_subscripts = 'abcdxp,AbcdXP->xpaXPA' gram_y_subscripts = 'efahyq,efAhYQ->yqaYQA' xq_subscripts = 'abcdxp,xpai->bcdi' yq_subscripts = 'efahyq,yqaj->efhj' recover_x_subscripts = 'abcdxp,axpsyq->sbcdyq' recover_y_subscripts = 'efahyq,ayqsxp->efshxp' elif x_pos[1] < y_pos[1]: # [x y] gram_x_subscripts = 'abcdxp,aBcdXP->xpbXPB' gram_y_subscripts = 'efgbyq,efgBYQ->yqbYQB' xq_subscripts = 'abcdxp,xpbi->acdi' yq_subscripts = 'efgbyq,yqbj->efgj' recover_x_subscripts = 'abcdxp,bxpsyq->ascdyq' recover_y_subscripts = 'efgbyq,byqsxp->efgsxp' elif x_pos[1] > y_pos[1]: # [y x] gram_x_subscripts = 'abcdxp,abcDXP->xpdXPD' gram_y_subscripts = 'edghyq,eDghYQ->yqdYQD' xq_subscripts = 'abcdxp,xpdi->abci' yq_subscripts = 'edghyq,yqdj->eghj' recover_x_subscripts = 'abcdxp,dxpsyq->abcsyq' recover_y_subscripts = 'edghyq,dyqsxp->esghxp' else: assert False numpy_backend = tensorbackends.get('numpy') def gram_qr_local(backend, a, gram_a_subscripts, q_subscripts): gram_a = backend.einsum(gram_a_subscripts, a.conj(), a) d1, d2, xi = gram_a.shape[:3] # local gram_a = gram_a.numpy().reshape(d1 * d2 * xi, d1 * d2 * xi) w, v = la.eigh(gram_a, overwrite_a=True) s = np.clip(w, 0, None)**0.5 s_pinv = np.divide(1, s, out=np.zeros_like(s), where=s != 0) r = np.einsum('j,ij->ji', s, v.conj()).reshape(d1 * d2 * xi, d1, d2, xi) r_inv = np.einsum('j,ij->ij', s_pinv, v).reshape(d1, d2, xi, d1 * d2 * xi) return numpy_backend.tensor(r), numpy_backend.tensor(r_inv) x, y = state.grid[x_pos], state.grid[y_pos] xr, xr_inv = gram_qr_local(state.backend, x, gram_x_subscripts, xq_subscripts) yr, yr_inv = gram_qr_local(state.backend, y, gram_y_subscripts, yq_subscripts) u, s, v = numpy_backend.einsumsvd('ixpk,jyqk->isyq,jsxp', xr, yr, option=ReducedSVD(rank)) s **= 0.5 u = numpy_backend.einsum('xpki,isyq,s->kxpsyq', xr_inv, u, s) v = numpy_backend.einsum('yqkj,jsxp,s->kyqsxp', yr_inv, v, s) u = state.backend.astensor(u) v = state.backend.astensor(v) state.grid[x_pos] = state.backend.einsum(recover_x_subscripts, x, u) state.grid[y_pos] = state.backend.einsum(recover_y_subscripts, y, v)
def apply_local_pair_operator_local_gram_qr_svd(state, operator, positions, rank, flip=False): assert len(positions) == 2 x_pos, y_pos = positions x, y = state.grid[x_pos], state.grid[y_pos] if flip: if x_pos[0] < y_pos[0]: # [x y]^T gram_x_subscripts = 'abcdpx,abCdpX->xcXC' gram_y_subscripts = 'cfghqy,CfghqY->ycYC' recover_x_subscripts = 'abcdpx,cxsu->absdpu' recover_y_subscripts = 'cfghqy,cysv->sfghqv' elif x_pos[0] > y_pos[0]: # [y x]^T gram_x_subscripts = 'abcdpx,AbcdpX->xaXA' gram_y_subscripts = 'efahpy,efAhpY->yaYA' recover_x_subscripts = 'abcdpx,axsu->sbcdpu' recover_y_subscripts = 'efahqy,aysv->efshqv' elif x_pos[1] < y_pos[1]: # [x y] gram_x_subscripts = 'abcdpx,aBcdpX->xbXB' gram_y_subscripts = 'efgbqy,efgBqY->ybYB' recover_x_subscripts = 'abcdpx,bxsu->ascdpu' recover_y_subscripts = 'efgbqy,bysv->efgsqv' elif x_pos[1] > y_pos[1]: # [y x] gram_x_subscripts = 'abcdpx,abcDpX->xdXD' gram_y_subscripts = 'edghqy,eDghqY->ydYD' recover_x_subscripts = 'abcdpx,dxsu->abcspu' recover_y_subscripts = 'edghqy,dysv->esghqv' else: assert False else: if x_pos[0] < y_pos[0]: # [x y]^T gram_x_subscripts = 'abcdxp,abCdXp->xcXC' gram_y_subscripts = 'cfghyq,CfghYq->ycYC' recover_x_subscripts = 'abcdxp,cxsu->absdup' recover_y_subscripts = 'cfghyq,cysv->sfghvq' elif x_pos[0] > y_pos[0]: # [y x]^T gram_x_subscripts = 'abcdxp,AbcdXp->xaXA' gram_y_subscripts = 'efahyq,efAhYq->yaYA' recover_x_subscripts = 'abcdxp,axsu->sbcdup' recover_y_subscripts = 'efahyq,aysv->efshvq' elif x_pos[1] < y_pos[1]: # [x y] gram_x_subscripts = 'abcdxp,aBcdXp->xbXB' gram_y_subscripts = 'efgbyq,efgBYq->ybYB' recover_x_subscripts = 'abcdxp,bxsu->ascdup' recover_y_subscripts = 'efgbyq,bysv->efgsvq' elif x_pos[1] > y_pos[1]: # [y x] gram_x_subscripts = 'abcdxp,abcDXp->xdXD' gram_y_subscripts = 'edghyq,eDghYq->ydYD' recover_x_subscripts = 'abcdxp,dxsu->abcsup' recover_y_subscripts = 'edghyq,dysv->esghvq' else: assert False numpy_backend = tensorbackends.get('numpy') def gram_qr_local(backend, a, gram_a_subscripts): gram_a = backend.einsum(gram_a_subscripts, a.conj(), a) d, xi = gram_a.shape[:2] # local gram_a = gram_a.numpy().reshape(d * xi, d * xi) w, v = la.eigh(gram_a, overwrite_a=True) s = np.clip(w, 0, None)**0.5 s_pinv = np.divide(1, s, out=np.zeros_like(s), where=s != 0) r = np.einsum('j,ij->ji', s, v.conj()).reshape(d * xi, d, xi) r_inv = np.einsum('j,ij->ij', s_pinv, v).reshape(d, xi, d * xi) return numpy_backend.tensor(r), numpy_backend.tensor(r_inv) xr, xr_inv = gram_qr_local(state.backend, x, gram_x_subscripts) yr, yr_inv = gram_qr_local(state.backend, y, gram_y_subscripts) operator = numpy_backend.tensor( operator if isinstance(operator, np.ndarray) else operator.numpy()) u, s, v = numpy_backend.einsumsvd('ixk,jyk,uvxy->isu,jsv', xr, yr, operator, option=ReducedSVD(rank)) s **= 0.5 u = numpy_backend.einsum('xki,isu,s->kxsu', xr_inv, u, s) v = numpy_backend.einsum('ykj,jsv,s->kysv', yr_inv, v, s) u = state.backend.astensor(u) v = state.backend.astensor(v) state.grid[x_pos] = state.backend.einsum(recover_x_subscripts, x, u) state.grid[y_pos] = state.backend.einsum(recover_y_subscripts, y, v)