Ejemplo n.º 1
0
 def test_divide_update(
     self,
     size,
     X_dtype,
     c_dtype,
 ):
     X = qu.randn(size, dtype=X_dtype)
     Y = np.empty_like(X)
     c = qu.randn(1, dtype=c_dtype).item()
     res = X / c
     qu.core.divide_update_(X, c, Y)
     assert_allclose(res, Y, rtol=1e-6)
Ejemplo n.º 2
0
 def test_subtract_update(
     self,
     size,
     X_dtype,
     c_dtype,
 ):
     X = qu.randn(size, dtype=X_dtype)
     Y = qu.randn(size, dtype=X_dtype)
     c = qu.randn(1, dtype=c_dtype).item()
     res = X - c * Y
     qu.core.subtract_update_(X, c, Y)
     assert_allclose(res, X)
Ejemplo n.º 3
0
 def test_unitize(self, method):
     mera = qt.MERA.rand(16, dangle=True)
     assert mera.H @ mera == pytest.approx(2.0)
     for t in mera:
         t.modify(data=qu.randn(t.shape))
     assert mera.H @ mera != pytest.approx(2.0)
     mera.unitize_(method=method)
     assert mera.H @ mera == pytest.approx(2.0)
Ejemplo n.º 4
0
def rand_rank(m, n, k, dtype=complex):
    s = np.sort(qu.randn(k)**2)[::-1]

    U = qu.gen.rand.rand_iso(m, k, dtype=dtype)
    VH = qu.gen.rand.rand_iso(n, k, dtype=dtype).conj().T

    if U.dtype in ('float32', 'complex64'):
        s = s.astype('float32')

    return usv2dense(U, s, VH)
Ejemplo n.º 5
0
def test_vectorizer():
    from quimb.tensor.optimize_autograd import Vectorizer

    shapes = [(2, 3), (4, 5), (6, 7, 8)]
    dtypes = ['complex64', 'float32', 'complex64']
    arrays = [qu.randn(s, dtype=dtype) for s, dtype in zip(shapes, dtypes)]

    v = Vectorizer(arrays)

    grads = [qu.randn(s, dtype=dtype) for s, dtype in zip(shapes, dtypes)]
    v.pack(grads, 'grad')

    new_arrays = v.unpack(v.vector)
    for x, y in zip(arrays, new_arrays):
        assert_allclose(x, y)

    new_arrays = v.unpack(v.grad)
    for x, y in zip(grads, new_arrays):
        assert_allclose(x, y)
Ejemplo n.º 6
0
    def test_subsystem(self):
        rho = qu.rand_rho(6)
        dims = [3, 2]
        I, X, Y, Z = (qu.pauli(s) for s in 'IXYZ')
        mi_i = qu.mutual_information(rho, dims)
        p = 0.1
        Ek = [(1 - p)**0.5 * I, (p / 3)**0.5 * X, (p / 3)**0.5 * Y,
              (p / 3)**0.5 * Z]

        with pytest.raises(ValueError):
            qu.kraus_op(rho,
                        qu.randn((3, 2, 2)),
                        check=True,
                        dims=dims,
                        where=1)

        sigma = qu.kraus_op(rho, Ek, check=True, dims=dims, where=1)
        mi_f = qu.mutual_information(sigma, dims)
        assert mi_f < mi_i
        assert qu.tr(sigma) == pytest.approx(1.0)
        sig_exp = sum(
            (qu.eye(3) & E) @ rho @ qu.dag(qu.eye(3) & E) for E in Ek)
        assert_allclose(sig_exp, sigma)
Ejemplo n.º 7
0
def eigs_lobpcg(A,
                k,
                *,
                B=None,
                v0=None,
                which=None,
                return_vecs=True,
                sigma=None,
                isherm=True,
                P=None,
                sort=True,
                **lobpcg_opts):
    """Interface to scipy's lobpcg eigensolver, which can be good for
    generalized eigenproblems with matrix-free operators. Seems to a be a bit
    innacurate though (e.g. on the order of ~ 1e-6 for eigenvalues). Also only
    takes real, symmetric problems, targeting smallest eigenvalues (though
    scipy will soon have complex support, and its easy to add oneself).

    Note that the slepc eigensolver also has a lobpcg backend
    (``EPSType='lobpcg'``) which accepts complex input and is more accurate -
    though seems slower.

    Parameters
    ----------
    A : array_like, sparse_matrix, LinearOperator or callable
        The operator to solve for.
    k : int
        Number of eigenpairs to return
    B : array_like, sparse_matrix, LinearOperator or callable, optional
        If given, the RHS operator (which should be positive) defining a
        generalized eigen problem.
    v0 : array_like (d, k), optional
        The initial subspace to iterate with.
    which : {'SA', 'LA'}, optional
        Find the smallest or largest eigenvalues.
    return_vecs : bool, optional
        Whether to return the eigenvectors found.
    P : array_like, sparse_matrix, LinearOperator or callable, optional
        Perform the eigensolve in the subspace defined by this projector.
    sort : bool, optional
        Whether to ensure the eigenvalues are sorted in ascending value.
    lobpcg_opts
        Supplied to :func:`scipy.sparse.linagl.lobpcg`.

    Returns
    -------
    lk : array_like (k,)
        The eigenvalues.
    vk : array_like (d, k)
        The eigenvectors, if `return_vecs=True`.

    See Also
    --------
    eigs_scipy, eigs_numpy, eigs_slepc
    """
    if not isherm:
        raise ValueError("lobpcg can only solve symmetric problems.")

    if sigma is not None:
        raise ValueError("lobpcg can only solve extremal eigenvalues.")

    # remove invalid options for lobpcg
    lobpcg_opts.pop('ncv', None)
    lobpcg_opts.pop('EPSType', None)

    # convert some arguments and defaults
    lobpcg_opts.setdefault('maxiter', 30)
    if lobpcg_opts['maxiter'] is None:
        lobpcg_opts['maxiter'] = 30
    largest = {'SA': False, 'LA': True}[which]

    if isinstance(A, qu.Lazy):
        A = A()
    if isinstance(B, qu.Lazy):
        B = B()
    if isinstance(P, qu.Lazy):
        P = P()

    # project into subspace
    if P is not None:
        A = qu.dag(P) @ (A @ P)

    # avoid matrix like behaviour
    if isinstance(A, qu.qarray):
        A = A.A

    d = A.shape[0]

    # set up the initial subsspace to iterate with
    if v0 is None:
        v0 = qu.randn((d, k), dtype=A.dtype)
    else:
        # check if intial space should be projected too
        if P is not None and v0.shape[0] != d:
            v0 = qu.dag(P) @ v0

        v0 = v0.reshape(d, -1)

        # if not enough initial states given, flesh out with random
        if v0.shape[1] != k:
            v0 = np.hstack(v0, qu.randn((d, k - v0.shape[1]), dtype=A.dtype))

    lk, vk = spla.lobpcg(A=A, X=v0, B=B, largest=largest, **lobpcg_opts)

    if return_vecs:
        vk = qu.qarray(vk)
        return maybe_sort_and_project(lk, vk, P, sort)
    else:
        return np.sort(lk) if sort else lk
Ejemplo n.º 8
0
 def test_scale_and_loc(self):
     x = qu.randn(1000, scale=100, loc=50, dtype=float, seed=42)
     assert_allclose(np.mean(x), 50, rtol=1e-1)
     assert_allclose(np.std(x), 100, rtol=1e-1)
Ejemplo n.º 9
0
 def test_can_seed(self):
     assert_allclose(qu.randn(5, seed=42), qu.randn(5, seed=42))
Ejemplo n.º 10
0
 def test_basic(self, dtype):
     x = qu.randn((2, 3, 4), dtype=dtype)
     assert x.shape == (2, 3, 4)
     assert x.dtype == np.dtype(dtype)