def test_mppovm_embed_expectation( nr_sites, local_dim, rank, startsite, width, rgen): if hasattr(local_dim, '__iter__'): local_dim2 = local_dim else: local_dim2 = [local_dim] * nr_sites local_dim2 = list(zip(local_dim2, local_dim2)) # Create a local POVM `red_povm`, embed it onto a larger chain # (`full_povm`), and go back to the reduced POVM. red_povm = mp.chain( mp.povm.MPPovm.from_local_povm(mp.povm.pauli_povm(d), 1) for d, _ in local_dim2[startsite:startsite + width] ) full_povm = red_povm.embed(nr_sites, startsite, local_dim) axes = [(1, 2) if i < startsite or i >= startsite + width else None for i in range(nr_sites)] red_povm2 = mp.partialtrace(full_povm, axes, mp.MPArray) red_povm2 = mp.prune(red_povm2, singletons=True) red_povm2 /= np.prod([d for i, (d, _) in enumerate(local_dim2) if i < startsite or i >= startsite + width]) assert_almost_equal(mp.normdist(red_povm, red_povm2), 0.0) # Test with an arbitrary random MPO instead of an MPDO mpo = mp.factory.random_mpa(nr_sites, local_dim2, rank, rgen, dtype=np.complex_, normalized=True) mpo_red = next(mp.reductions_mpo(mpo, width, startsites=[startsite])) ept = mp.prune(full_povm.pmf(mpo, 'mpdo'), singletons=True).to_array() ept_red = red_povm.pmf(mpo_red, 'mpdo').to_array() assert_array_almost_equal(ept, ept_red)
def run(seed): rgen = numpy.random.RandomState(seed) X = mpnum.random_mpa(sites, dim, rank, randstate=rgen, normalized=True) nr_measurements = int(C * dim * sites * rank**2 * numpy.log2(rank + 1)) A = [ mpnum.random_mpa(len(X), X.pdims, 1, randstate=rgen, normalized=True, dtype=X.dtype) for _ in range(nr_measurements) ] y = [inner_prod_mps(a, X) for a in A] X_sharp = AltminEstimator(A, y, rank).estimate(maxiter, thresh=dist_crit) return { 'X': X, 'X_sharp': X_sharp, 'dist': mpnum.normdist(X, X_sharp), 'C': C, 'seed': seed, 'dim': dim, 'rank': rank, 'sites': sites }
def test_recover(sites, dim, rank): X = mp.random_mpa(sites, dim, rank, normalized=True) measurements = 5 * sites * rank**2 * dim A = [mp.random_mpa(sites, dim, 1) for _ in range(measurements)] Y = [mp.special.inner_prod_mps(a, X) for a in A] estimator = AltminEstimator(A, Y, 2 * rank) X_hat = next(it.islice(estimator, 10, 11)) assert mp.normdist(X, X_hat) < 1e-3
def _convergence(propagator, start, step, stop, err, verbose): """ Performs imaginary time evolution of the passed propagator until a convergence condition is met (absolute l2-norm difference between the states from successive timesteps is small enough) """ nof_steps = stop if verbose: print('Starting propagtion') for i in range(start - 1): propagator.fast_evolve(end=False) if verbose: print('Step {:d}:'.format(i+1)) print(propagator.psi_t.ranks) propagator.fast_evolve(end=True) if verbose: print('Step {:d}:'.format(start)) print(propagator.psi_t.ranks) last_psi_t = propagator.psi_t.copy() check = None for i in range(1, (stop - start)+1): if i % step == 0: propagator.fast_evolve(end=True) check = mp.normdist(last_psi_t, propagator.psi_t) if check < err: if verbose: print('Step {:d}:'.format(i+start)) print(propagator.psi_t.ranks) nof_steps = i+start break else: last_psi_t = propagator.psi_t.copy() else: propagator.fast_evolve(end=False) if verbose: print('Step {:d}:'.format(i+start)) print(propagator.psi_t.ranks) if nof_steps == stop: print('Did not reach convergence in ' + str(nof_steps) + ' steps!') info = propagator.info() info['nof_steps'] = nof_steps info['error'] = check if verbose: print('Propagation finished') return propagator.psi_t, info