def test_gradient_and_hessian2():
    num_sites = 2
    basis = qy.cluster_basis(np.arange(1, num_sites + 1), np.arange(num_sites),
                             'Pauli')

    args = {
        'dbasis': 10,
        'xtol': 1e-3,
        'num_expansions': 2,
        'verbose': True,
        'global_op_type': 'Pauli'
    }

    # Pick a random Hamiltonian in the basis.
    np.random.seed(42)
    num_trials_H = 2
    num_trials_tau = 2
    for ind_trial_H in range(num_trials_H):
        H = qy.Operator(2.0 * np.random.rand(len(basis)) - 1.0, basis)

        # Pick a random vector in the basis.
        for ind_trial_tau in range(num_trials_tau):
            tau = 2.0 * np.random.rand(len(basis)) - 1.0
            tau /= nla.norm(tau)
            initial_op = qy.Operator(tau, basis)

            [op, com_norm, binarity,
             results_data] = bioms.find_binary_iom(H,
                                                   initial_op,
                                                   args,
                                                   _check_derivatives=True)
Esempio n. 2
0
def test_com_norm_binarity1():
    # Check that the commutator norm and binarity
    # found by find_binary_iom() are accurate.
    # Each call to find_binary_iom is independent:
    # no explored data is reused.

    np.random.seed(43)

    # Number of random Hamiltonians to check.
    num_trials = 10

    # Identity operator.
    I = qy.Operator([1.], [qy.opstring('I', 'Pauli')])

    # Create random 1D Heisenberg Hamiltonians.
    for ind_trial in range(num_trials):
        L = 5
        J_xy = 1.0
        J_z = 1.0

        # Random disorder strength.
        W = 10.0 * np.random.rand()

        # The XXZ chain.
        H = bioms.xxz_chain(L, J_xy, J_z)
        # Perturbing magnetic fields.
        H += bioms.magnetic_fields(W * (2.0 * np.random.rand(L) - 1.0))

        # Start with a single Z at center.
        initial_op = qy.Operator([1.], [qy.opstring('Z {}'.format(L // 2))])

        print('H = \n{}'.format(H))
        print('initial_op = \n{}'.format(initial_op))

        # Allow the basis to expand.
        args = {
            'dbasis': 10,
            'xtol': 1e-6,
            'num_expansions': 4,
            'verbose': True,
            'global_op_type': 'Pauli'
        }

        [op, com_norm, binarity,
         results_data] = bioms.find_binary_iom(H,
                                               initial_op,
                                               args,
                                               _check_quantities=True)

        op *= 1.0 / op.norm()

        # Check the commutator norm and binarity of the result.
        com_norm_check = qy.commutator(H, op).norm()**2.0
        binarity_check = (0.5 * qy.anticommutator(op, op) - I).norm()**2.0

        assert (np.abs(com_norm - com_norm_check) < 1e-12)
        assert (np.abs(binarity - binarity_check) < 1e-12)
def two_site_ham1(ai, bi, aj, bj, i, j):
    # Two-site Hamiltonian that commutes with
    #     \alpha_i a_i + \alpha_j a_j
    # and \beta_i b_i + \beta_j b_j

    # Also works
    """
    coeffs    = [ai*bj+aj*bi,
                 ai*bj-aj*bi,
                 -2.0*aj*bj,
                 -2.0*ai*bi]
    """

    coeffs     = [1.0 + (aj/ai)/(bj/bi),
                  1.0 - (aj/ai)/(bj/bi),
                  -2.0*(aj/ai),
                  -2.0*(bi/bj)]

    op_strings = [qy.opstring('CDag {} C {}'.format(i,j)),
                  qy.opstring('CDag {} CDag {}'.format(i,j)),
                  qy.opstring('CDag {} C {}'.format(i,i)),
                  qy.opstring('CDag {} C {}'.format(j,j))]

    norm = (1.0 + (aj/ai)/(bj/bi))

    result = qy.convert(qy.Operator(np.array(coeffs)/norm, op_strings), 'Majorana')
    #print(result)
    
    return result
Esempio n. 4
0
def magnetic_fields(potentials):
    """Construct a magnetic fields operator H = 1/2 \\sum_j h_j Z_j from the specified potentials h_j. 
    
    Parameters
    ----------
    potentials : list or ndarray
        The potentials h_j.
    
    Returns
    -------
    qosy.Operator
        The Hamiltonian representing the magnetic fields.
    
    Examples
    --------
    Build a 5-site disordered Heisenberg model:
        >>> import numpy as np
        >>> W = 6.0 # Disorder strength
        >>> H = xxz_square(5, 1.0, 1.0) + W * magnetic_fields(2.0*np.random.rand(5) - 1.0)
    """

    N = len(potentials)

    coeffs = 0.5 * potentials
    op_strings = [qy.opstring('Z {}'.format(site)) for site in range(N)]

    H = qy.Operator(coeffs, op_strings)

    return H
Esempio n. 5
0
def number_ops(H, num_orbitals):
    """Construct the number operators that diagonalize the given
    quadratic fermionic tight-binding Hamiltonian.
    
    Parameters
    ----------
    H : qosy.Operator
        The quadratic tight-binding Hamiltonian.
    num_orbitals : int
        The number of orbitals (sites) in the model.
    
    Returns
    -------
    list of qosy.Operator
        The number operators that commute with H.
    """

    H_tb = qy.convert(H, 'Fermion')
    H_tb = H_tb.remove_zeros(tol=1e-15)
    (evals, evecs) = qy.diagonalize_quadratic_tightbinding(H_tb, num_orbitals)

    N = num_orbitals

    # NOTE: Number operator construction assumes
    # that the tight-binding Hamiltonian has only real coefficients.
    assert (np.allclose(np.imag(evecs), np.zeros_like(evecs)))
    evecs = np.real(evecs)

    num_ops = []
    for ind_ev in range(N):
        coeffs = []
        op_strings = []
        for orb in range(N):
            coeffs.append(np.abs(evecs[orb, ind_ev])**2.0)
            op_strings.append(qy.opstring('D {}'.format(orb)))

        for orb1 in range(N):
            for orb2 in range(orb1 + 1, N):
                coeffs.append(-evecs[orb1, ind_ev] * evecs[orb2, ind_ev])
                op_strings.append(
                    qy.opstring('1j A {} B {}'.format(orb1, orb2)))

                coeffs.append(+evecs[orb1, ind_ev] * evecs[orb2, ind_ev])
                op_strings.append(
                    qy.opstring('1j B {} A {}'.format(orb1, orb2)))

        num_op = qy.Operator(coeffs, op_strings, 'Majorana')

        num_ops.append(num_op)

    return (num_ops, evals)
Esempio n. 6
0
def xxz_chain(L, J_xy, J_z, periodic=False):
    """Construct a 1D XXZ Hamiltonian H = 1/4 \sum_<ij> [J_xy (X_i X_j + Y_i Y_j) + J_z Z_i Z_j]
    
    Parameters
    ----------
    L : int
        The length of the chain.
    J_xy : float
        The coefficient in front of the exchange term.
    J_z : float
        The coefficient in front of the Ising term.
    periodic : bool, optional
        Specifies whether the model is periodic. Defaults to False.
    
    Returns
    -------
    qosy.Operator
        The Hamiltonian.
    
    Examples
    --------
    Build a 5-site Heisenberg chain:
        >>> H = xxz_chain(5, 1.0, 1.0)
    """

    coeffs = []
    op_strings = []
    for site in range(L):
        if site == L - 1 and not periodic:
            continue
        sitep = (site + 1) % L

        s1 = np.minimum(site, sitep)
        s2 = np.maximum(site, sitep)

        for orb in ['X', 'Y', 'Z']:
            if orb in ['X', 'Y']:
                coeffs.append(0.25 * J_xy)
            else:
                coeffs.append(0.25 * J_z)
            op_strings.append(
                qy.opstring('{} {} {} {}'.format(orb, s1, orb, s2)))

    H = qy.Operator(coeffs, op_strings)

    return H
Esempio n. 7
0
def test_find_biom():
    # Test that the gradient-descent works
    # by finding a non-interacting integral of
    # motion without performing any basis expansions.

    np.random.seed(42)

    # Number of random Hamiltonians to check.
    num_trials = 10

    # Create random non-interacting Hamiltonians.
    for ind_trial in range(num_trials):
        L = 5
        J_xy = 1.0
        J_z = 0.0

        # Random disorder strength.
        W = 10.0 * np.random.rand()

        # The XXZ chain.
        H_tb = bioms.xxz_chain(L, J_xy, J_z)
        # Perturbing magnetic fields.
        H_tb += bioms.magnetic_fields(W * (2.0 * np.random.rand(L) - 1.0))

        # Find the fermion parity operator integral of motion
        # of the tight-binding Hamiltonian.
        [num_ops, energies] = bioms.number_ops(H_tb, L)
        #print('number operators = ')
        #qy.print_operators(num_ops)

        # Initial operator is a D at the center site.
        coeffs = [1.0]
        op_strings = [qy.opstring('D {}'.format(L // 2))]
        for site1 in range(L):
            for site2 in range(site1, L):
                if site1 != site2:
                    coeffs.append(0.0)
                    op_strings.append(
                        qy.opstring('1j A {} B {}'.format(site1, site2)))
                    coeffs.append(0.0)
                    op_strings.append(
                        qy.opstring('1j B {} A {}'.format(site1, site2)))
                else:
                    if site1 != L // 2:
                        coeffs.append(0.0)
                        op_strings.append(qy.opstring('D {}'.format(site1)))

        initial_op = qy.Operator(coeffs, op_strings)

        args = {
            'dbasis': 0,
            'xtol': 1e-8,
            'num_expansions': 1,
            'verbose': True,
            'global_op_type': 'Majorana'
        }

        [op, com_norm, binarity,
         results_data] = bioms.find_binary_iom(H_tb, initial_op, args)

        op *= 1.0 / op.norm()
        op = qy.convert(op, 'Majorana')

        # Check that the found operator is +/- one of the fermion parity operators.
        found_op = False
        for num_op in num_ops:
            diff1 = (num_op - op).norm()
            diff2 = (num_op + op).norm()
            print('diffs = {}, {}'.format(diff1, diff2))
            found_op = (diff1 < 1e-7) or (diff2 < 1e-7)
            if found_op:
                break

        assert (found_op)
        assert (com_norm < 1e-12)
        assert (binarity < 1e-12)

        # Check the commutator norm and binarity of the result.
        H = qy.convert(H_tb, 'Majorana')
        I = qy.Operator([1.], [qy.opstring('I', 'Majorana')])
        com_norm_check = qy.commutator(H, op).norm()**2.0
        binarity_check = (0.5 * qy.anticommutator(op, op) - I).norm()**2.0

        assert (np.abs(com_norm - com_norm_check) < 1e-12)
        assert (np.abs(binarity - binarity_check) < 1e-12)
Esempio n. 8
0
def test_binary_op_ED2():
    # Find an approximately binary operator on five sites
    # obtained after a few basis expansions.
    # Check with ED that the commutator norm and binarity
    # agrees with the ED estimates.

    # Allow the basis to expand.
    args = {
        'dbasis': 10,
        'xtol': 1e-12,
        'num_expansions': 2,
        'verbose': True,
        'global_op_type': 'Pauli'
    }

    args['explored_basis'] = qy.Basis()
    args['explored_extended_basis'] = qy.Basis()
    args['explored_com_data'] = dict()
    args['explored_anticom_data'] = dict()

    # Also check that the "local Hamiltonian" function in find_binary_iom()
    # is working correctly with expansions and finding an IOM efficiently.
    L = 11
    basis = qy.cluster_basis(np.arange(1, 3 + 1),
                             np.arange(L // 2 - 1, L // 2 + 2), 'Pauli')

    np.random.seed(46)

    # Number of random Hamiltonians to check.
    num_trials = 2

    # Identity operator.
    I = qy.Operator([1.], [qy.opstring('I', 'Pauli')])

    # Create random 1D Heisenberg Hamiltonians.
    for ind_trial in range(num_trials):
        J_xy = 1.0
        J_z = 1.0

        # Random disorder strength.
        W = 10.0 * np.random.rand()

        # The XXZ chain.
        H = bioms.xxz_chain(L, J_xy, J_z)
        # Perturbing magnetic fields.
        H += bioms.magnetic_fields(W * (2.0 * np.random.rand(L) - 1.0))

        # Start with a single Z at center.
        Z_center = qy.opstring('Z {}'.format(L // 2))
        ind_Z_center = basis.index(Z_center)
        coeffs = np.zeros(len(basis), dtype=complex)
        coeffs[ind_Z_center] = 1.0
        initial_op = qy.Operator(coeffs, basis)

        print('H = \n{}'.format(H))
        print('initial_op = \n{}'.format(initial_op))
        [op, com_norm, binarity,
         results_data] = bioms.find_binary_iom(H,
                                               initial_op,
                                               args,
                                               _check_quantities=True)

        op *= 1.0 / op.norm()

        # Check the commutator norm and binarity of the result.
        com_norm_check = qy.commutator(H, op).norm()**2.0
        binarity_check = (0.5 * qy.anticommutator(op, op) - I).norm()**2.0

        assert (np.abs(com_norm - com_norm_check) < 1e-12)
        assert (np.abs(binarity - binarity_check) < 1e-12)

        # Perform ED on the operator.
        op_mat = qy.to_matrix(op, L).toarray()
        (evals, evecs) = nla.eigh(op_mat)

        print('Operator eigenvalues: {}'.format(evals))

        H_mat = qy.to_matrix(H, L).toarray()

        binarity_ED = np.sum(np.abs(np.abs(evals)**2.0 - 1.0)**
                             2.0) / op_mat.shape[0]
        com_ED = np.dot(H_mat, op_mat) - np.dot(op_mat, H_mat)
        com_norm_ED = np.real(np.trace(np.dot(np.conj(com_ED.T),
                                              com_ED))) / com_ED.shape[0]

        print('com_norm       = {}'.format(com_norm))
        print('com_norm_check = {}'.format(com_norm_check))
        print('com_norm_ED    = {}'.format(com_norm_ED))

        print('binarity       = {}'.format(binarity))
        print('binarity_check = {}'.format(binarity_check))
        print('binarity_ED    = {}'.format(binarity_ED))

        # Check the commutator norm and binarity against the ED estimates.
        assert (np.abs(com_norm - com_norm_ED) < 1e-12)
        assert (np.abs(binarity - binarity_ED) < 1e-12)
Esempio n. 9
0
def test_truncation_op_type():
    # Check that the commutator norm and binarity
    # found by find_binary_iom() are accurate.
    # In this test, we check that setting a non-zero
    # truncation size for the [H, [H, tau]] truncation
    # and trying different op_types work correctly.

    np.random.seed(1)

    for global_op_type in ['Pauli', 'Majorana']:
        args = {
            'dbasis': 10,
            'xtol': 1e-6,
            'num_expansions': 4,
            'verbose': True,
            'global_op_type': global_op_type,
            'truncation_size': 5
        }

        args['explored_basis'] = qy.Basis()
        args['explored_extended_basis'] = qy.Basis()
        args['explored_com_data'] = dict()
        args['explored_anticom_data'] = dict()

        # Number of random Hamiltonians to check.
        num_trials = 5

        # Identity operator.
        I = qy.Operator([1.], [qy.opstring('I', 'Pauli')])
        I = qy.convert(I, global_op_type)

        # Create random 1D Heisenberg Hamiltonians.
        for ind_trial in range(num_trials):
            L = 5
            J_xy = 1.0
            J_z = 1.0

            # Random disorder strength.
            W = 10.0 * np.random.rand()

            # The XXZ chain.
            H = bioms.xxz_chain(L, J_xy, J_z)
            # Perturbing magnetic fields.
            H += bioms.magnetic_fields(W * (2.0 * np.random.rand(L) - 1.0))

            H = qy.convert(H, global_op_type)

            # Start with a single Z at center.
            initial_op = qy.Operator([1.],
                                     [qy.opstring('Z {}'.format(L // 2))])
            initial_op = qy.convert(initial_op, global_op_type)

            print('H = \n{}'.format(H))
            print('initial_op = \n{}'.format(initial_op))
            [op, com_norm, binarity,
             results_data] = bioms.find_binary_iom(H,
                                                   initial_op,
                                                   args,
                                                   _check_quantities=True)

            op *= 1.0 / op.norm()

            # Check the commutator norm and binarity of the result.
            com_norm_check = qy.commutator(H, op).norm()**2.0
            binarity_check = (0.5 * qy.anticommutator(op, op) - I).norm()**2.0

            assert (np.abs(com_norm - com_norm_check) < 1e-12)
            assert (np.abs(binarity - binarity_check) < 1e-12)
Esempio n. 10
0
def find_binary_iom(hamiltonian,
                    initial_op,
                    args=None,
                    _check_derivatives=False,
                    _check_quantities=False):
    """Find an approximate binary integral of motion O by iteratively
       minimizing the objective function
          \\lambda_1 |[H, O]|^2 + \\lambda_2 |O^2 - I|^2
       using gradient descent (Newton's method with conjugate gradient
       to invert the Hessian). The optimized O is Hermitian, traceless,
       and should approximately commute with H and approximately square
       to identity.

       Parameters
       ----------
       hamiltonian : qosy.Operator
           The Hamiltonian.
       initial_op : qosy.Operator
           The initial operator O. This defines the initial basis of OperatorStrings of O.
       args : dict, optional
           The arguments to the algorithm. Defaults to None.

       Returns
       -------
       [tau_op, com_norm, binarity, results_data]
           tau_op is a qosy.Operator representing the optimized binary integral of motion.
           com_norm and binarity are its commutator norm and binarity. results_data is a 
           dict containing additional information about the optimization.

       Note
       ----
       Internally, this function assumes that explored_basis, explored_extended_basis, 
       explored_com_data, and explored_anticom_data are never reset in between basis
       expansions. If it is, this will probably break something. A reset will at least
       cause the basis_inds stored in results_data to not properly correspond to the
       OperatorStrings in explored_basis and so will result in corrupted output files.

       It is OK to reset the "explored" variables *after* find_binary_iom() is called,
       but not while it is still executing. In this case, *all* of the variables should
       be reset together.
    """

    if args is None:
        args = dict()

    ### SETUP THE ALGORITHM PARAMETERS ###

    # Flag whether to print output for the run.
    verbose = arg(args, 'verbose', False)

    # The "explored" commutation and anticommutation relations
    # saved so far in the calculation. Used as a look-up table.
    if ('explored_basis' not in args) or (args['explored_basis'] is None):
        args['explored_basis'] = qy.Basis()
    if ('explored_extended_basis'
            not in args) or (args['explored_extended_basis'] is None):
        args['explored_extended_basis'] = qy.Basis()
    if ('explored_com_data'
            not in args) or (args['explored_com_data'] is None):
        args['explored_com_data'] = dict()
    if ('explored_anticom_data'
            not in args) or (args['explored_anticom_data'] is None):
        args['explored_anticom_data'] = dict()

    # The RAM threshold.
    percent_mem_threshold = arg(args, 'percent_mem_threshold', 85.0)

    # If using more than the RAM threshold, empty the explored data.
    #check_memory(args)
    if verbose:
        print_memory_usage()

    # The OperatorString type to use in all calculations.
    # Defaults to the type of the intial_op.
    global_op_type = arg(args, 'global_op_type', initial_op.op_type)

    # The \\lambda_1 coefficient in front of |[H, O]|^2
    coeff_com_norm = arg(args, 'coeff_com_norm', 1.0)
    # The \\lambda_2 coefficient in front of |O^2 - I|^2
    coeff_binarity = arg(args, 'coeff_binarity', 1.0)

    # The size of the truncated basis to represent [H, \tau]
    # when expanding by [H, [H, \tau]].
    truncation_size = arg(args, 'truncation_size', None)

    # The tolerance of the answer used as a convergence criterion for Newton's method.
    xtol = arg(args, 'xtol', 1e-6)

    # The number of expansions of the basis to
    # perform during the optimization.
    num_expansions = arg(args, 'num_expansions', 6)

    # The number of OperatorStrings to add to the
    # basis at each expansion step.
    dbasis = arg(args, 'dbasis', len(initial_op._basis))

    # The filename to save data to. If not provided, do not write to file.
    results_filename = arg(args, 'results_filename', None)

    # Identity operator string for reference.
    identity = qy.opstring('I', op_type=global_op_type)
    ### SETUP THE ALGORITHM PARAMETERS ###

    ### INITIALIZE THE HAMILTONIAN AND IOM ###
    # The Hamiltonian H that we are considering.
    H = qy.convert(copy.deepcopy(hamiltonian), global_op_type)

    # The binary integral of motion \\tau that we are finding.
    tau = qy.convert(copy.deepcopy(initial_op), global_op_type)
    tau.coeffs = tau.coeffs.real
    tau *= 1.0 / tau.norm()

    initial_tau = copy.deepcopy(tau)

    if verbose:
        print('Initial \\tau = ', flush=True)
        print_operator(initial_tau, num_terms=20)

    basis = tau._basis
    tau = tau.coeffs
    ### INITIALIZE THE HAMILTONIAN AND IOM ###

    ### DEFINE THE FUNCTIONS USED IN THE OPTIMIZATION ###
    # Variables to update at each step of the optimization.
    com_norm = None
    binarity = None
    obj_val = None
    com_residual = None
    anticom_residual = None
    inds_extended_basis_anticom = None
    ind_identity_anticom = None  # The index of the identity OperatorString in extended_basis_anticom.
    s_constants_anticom = None  # The current (anti-commuting) structure constants for the basis.
    iteration_data = None

    # Returns the iteration data
    # needed by obj, grad_obj, and hess_obj
    # and computes/updates it if it is not available.
    def updated_iteration_data(y):
        # Nonlocal variables that will be used or
        # modified in this function.
        nonlocal basis, iteration_data, inds_extended_basis_anticom, args, s_constants_anticom

        if iteration_data is not None and np.allclose(
                y, iteration_data[0], atol=1e-14):
            return iteration_data
        else:
            Lbar_tau = build_l_matrix(s_constants_anticom, y, basis,
                                      inds_extended_basis_anticom)
            iteration_data = [np.copy(y), Lbar_tau]

            return iteration_data

    def obj(y):
        # Nonlocal variables that will be used or
        # modified in this function.
        nonlocal basis, L_H, com_norm, binarity, obj_val, com_residual, anticom_residual, inds_extended_basis_anticom, ind_identity_anticom, identity, _check_quantities

        com_residual = L_H.dot(y)
        com_norm = nla.norm(com_residual)**2.0

        [_, Lbar_tau] = updated_iteration_data(y)

        anticom_residual = 0.5 * Lbar_tau.dot(y)
        anticom_residual[ind_identity_anticom] -= 1.0
        binarity = nla.norm(anticom_residual)**2.0

        # For debugging, check that commutator norm and binarity are correct:
        if _check_quantities:
            optest = qy.Operator(y, basis)
            id_op = qy.Operator([1.], [identity])
            com_norm_check = qy.commutator(H, optest).norm()**2.0
            optest_sqr = 0.5 * qy.anticommutator(optest, optest)
            binarity_check = (optest_sqr - id_op).norm()**2.0

            print('com_norm = {}, com_norm_check = {}'.format(
                com_norm, com_norm_check),
                  flush=True)
            print('binarity = {}, binarity_check = {}'.format(
                binarity, binarity_check),
                  flush=True)

            # Check that what the algorithm (bioms) thinks is O^2-I
            # agrees with what a brute-force calculation (qosy) thinks is O^2-I.

            # Check that they include the same operator strings.
            extended_basis_anticom = qy.Basis([
                args['explored_extended_basis'][ind_exp]
                for ind_exp in inds_extended_basis_anticom
            ])
            check_os = True
            for (_, os_) in optest_sqr:
                if os_ not in extended_basis_anticom:
                    print('extended_basis_anticom is missing {}'.format(os_),
                          flush=True)
                    check_os = False
            for os_ in extended_basis_anticom:
                if os_ not in optest_sqr._basis and os_ != identity:
                    coeff_ = anticom_residual[extended_basis_anticom.index(
                        os_)]
                    if np.abs(coeff_) > 1e-12:
                        print(
                            'extended_basis_anticom has an extra operator {}'.
                            format(os_),
                            flush=True)
                        check_os = False
            if not check_os:
                print('\nO = ', flush=True)
                print_operator(qy.Operator(y, basis), np.inf)

                print('\nWhat bioms thinks is O^2-I = ', flush=True)
                print_operator(
                    qy.Operator(anticom_residual, extended_basis_anticom),
                    np.inf)

                print('\nWhat qosy thinks is O^2-I =', flush=True)
                print_operator(optest_sqr - id_op, np.inf)
            assert (check_os)

            assert (np.abs(com_norm - com_norm_check) < 1e-12)
            assert (np.abs(binarity - binarity_check) < 1e-5)

        obj_val = coeff_com_norm * com_norm + coeff_binarity * binarity

        return obj_val

    # Specifies the gradient \partial_a Z of the objective function
    # Z = \\lambda_1 |[H, O]|^2 + \\lambda_2 |O^2 - I|^2,
    # where O=\sum_a g_a S_a.
    _checked_grad = False

    def grad_obj(y):
        # Nonlocal variables that will be used or
        # modified in this function.
        nonlocal basis, C_H, inds_extended_basis_anticom, ind_identity_anticom, identity, _check_derivatives, _checked_grad

        [_, Lbar_tau] = updated_iteration_data(y)

        Lbar_vec = Lbar_tau[ind_identity_anticom, :]
        Lbar_vec = Lbar_vec.real

        grad_obj = coeff_com_norm * (2.0 * C_H.dot(y)) \
                   + coeff_binarity * ((Lbar_tau.H).dot(Lbar_tau.dot(y)) - 2.0 * Lbar_vec)
        grad_obj = np.array(grad_obj).flatten().real

        # For debugging, check the derivatives against finite-difference derivatives.
        if _check_derivatives and not _checked_grad:
            fd_grad_obj = finite_diff_gradient(obj, y, eps=1e-5)

            print('fd_grad_obj = {}'.format(fd_grad_obj), flush=True)
            print('grad_obj    = {}'.format(grad_obj), flush=True)
            err_grad = nla.norm(fd_grad_obj - grad_obj)
            print('err_grad = {}'.format(err_grad), flush=True)

            assert (err_grad < 1e-8)

            _checked_grad = True

        return grad_obj

    _checked_hess = False

    def hess_obj(y):
        # Nonlocal variables that will be used or
        # modified in this function.
        nonlocal basis, C_H, args, inds_extended_basis_anticom, ind_identity_anticom, identity, _check_derivatives, _checked_hess, s_constants_anticom

        #explored_s_constants = args['explored_anticom_data']

        [_, Lbar_tau] = updated_iteration_data(y)
        Cbar_tau = (Lbar_tau.H).dot(Lbar_tau)

        # The part of the Hessian due to the commutator norm.
        hess_obj = coeff_com_norm * (2.0 * C_H)

        # The first part of the Hessian due to the binarity.
        hess_obj += coeff_binarity * (2.0 * Cbar_tau)

        # The index in the extended basis of the identity operator.
        #ind_identity_anticom

        # Vector representation of {\\tau, \\tau} in the extended basis.
        Lbar_vec = Lbar_tau.dot(y)

        # The final terms to add to the Hessian due to the binarity.
        terms = np.zeros((len(y), len(y)), dtype=complex)

        # The remaining parts of the Hessian due to the binarity.
        for (indB, indC, indA, coeffC) in zip(*s_constants_anticom):
            terms[indB, indA] += coeffC * np.conj(Lbar_vec[indC])

            if indC == ind_identity_anticom:
                terms[indB, indA] += -2.0 * np.conj(coeffC)

        # The final terms added to the Hessian.
        hess_obj += coeff_binarity * terms

        hess_obj = np.array(hess_obj).real

        # For debugging, check the derivatives against finite-difference derivatives.
        if _check_derivatives and not _checked_hess:
            fd_hess_obj = finite_diff_hessian(grad_obj, y, eps=1e-4)

            print('fd_hess_obj = {}'.format(fd_hess_obj), flush=True)
            print('hess_obj    = {}'.format(hess_obj), flush=True)
            err_hess = nla.norm(fd_hess_obj - hess_obj)
            print('err_hess = {}'.format(err_hess), flush=True)

            assert (err_hess < 1e-6)

            _checked_hess = True

        return hess_obj

    com_norms = []
    binarities = []
    taus = []
    tau_norms = []
    objs = []

    def update_vars(y):
        nonlocal com_norms, binarities, taus, tau_norms, objs, iteration_data, com_norm, obj_val, binarity, _checked_grad, _checked_hess
        # Ensure that the nonlocal variables are updated properly.

        # Make sure that the vector is normalized here.
        taus.append(y / nla.norm(y))
        tau_norms.append(nla.norm(y))
        objs.append(obj_val)

        obj_val_original = obj_val

        # Called to recompute com_norm and binarity with
        # a normalized tau operator.
        obj(y / nla.norm(y))

        com_norms.append(com_norm)
        binarities.append(binarity)

        # Reset the iteration data.
        del iteration_data
        iteration_data = None

        if verbose:
            print(' (obj = {}, |\\tau| = {}, com_norm = {}, binarity = {})'.
                  format(obj_val_original, nla.norm(y), com_norm, binarity),
                  flush=True)

        # Reset the _checked derivatives flags so that you can check the
        # derivatives against finite difference in the next step if
        # _check_derivatives=True.
        _checked_grad = False
        _checked_hess = False

    ### DEFINE THE FUNCTIONS USED IN THE OPTIMIZATION ###

    ### RUN THE OPTIMIZATION ###
    start_run = time.time()

    prev_num_taus = 0
    num_taus_in_expansion = []
    ind_expansion_from_ind_tau = dict()
    basis_sizes = []
    basis_inds = []
    for ind_expansion in range(num_expansions):
        if verbose:
            print('==== Iteration {}/{} ===='.format(ind_expansion + 1,
                                                     num_expansions),
                  flush=True)
            print('Basis size: {}'.format(len(basis)), flush=True)

        if verbose:
            print_memory_usage()

        ### Compute the relevant quantities in the current basis.

        # Use only the local part of the Hamiltonian if possible (using Pauli strings).
        if global_op_type == 'Pauli':
            # Consider only the terms in H that have spatial overlap
            # with the current basis, so that you do not waste
            # time and memory considering commutators between far away Pauli strings.
            H_local = find_local_H(basis, H)
        else:
            H_local = H

        # The commutant matrix C_H is computed once in each expansion.
        [s_constants_com, inds_extended_basis_com,
         _] = build_s_constants(basis,
                                H_local._basis,
                                args['explored_basis'],
                                args['explored_extended_basis'],
                                args['explored_com_data'],
                                operation_mode='commutator')
        L_H = build_l_matrix(s_constants_com, H_local, basis,
                             inds_extended_basis_com)
        C_H = (L_H.H).dot(L_H)
        C_H = C_H.real

        # The (anti-commuting) structure constants \bar{f}_{ba}^c are computed once in each expansion.
        # But the Liouvillian matrix (L_H)_{ca} = \sum_b J_b \bar{f}_{ba}^c
        # (and the anti-commutant matrix C_H = (L_H)^\dagger L_H) are computed many times
        # from the structure constants.
        [
            s_constants_anticom, inds_extended_basis_anticom,
            ind_identity_anticom
        ] = build_s_constants(basis,
                              basis,
                              args['explored_basis'],
                              args['explored_extended_basis'],
                              args['explored_anticom_data'],
                              operation_mode='anticommutator')

        basis_inds_in_explored_basis = np.array(
            [args['explored_basis'].index(os_b) for os_b in basis], dtype=int)
        basis_inds.append(basis_inds_in_explored_basis)

        basis_sizes.append(len(basis))

        # For debugging memory usage:
        """
        if verbose:
            vars_to_examine = [('args', args),
                               ('explored_anticom_data', args['explored_anticom_data']),
                               ('explored_com_data', args['explored_com_data']),
                               ('explored_extended_basis', args['explored_extended_basis']),
                               ('explored_basis', args['explored_basis']),
                               ('s_constants_anticom', s_constants_anticom),
                               ('s_constants_com', s_constants_com),
                               ('iteration_data', iteration_data),
                               ('L_H', L_H),
                               ('C_H', C_H),
                               ('anticom_residual', anticom_residual),
                               ('com_residual', com_residual),
                               ('inds_extended_basis_anticom', inds_extended_basis_anticom),
                               ('inds_extended_basis_com', inds_extended_basis_com)]
            for (var_name, var) in vars_to_examine:
                print('Size of {} is {} GB'.format(var_name, get_size(var)/1e9))

            #print('EXPLORED COM DATA:')
            #for key in args['explored_com_data']:
            #    print('{} : {}'.format(key, args['explored_com_data'][key]))
        """

        ### Minimize the objective function.
        options = {'maxiter': 1000, 'disp': verbose, 'xtol': xtol}
        x0 = tau.real / nla.norm(tau.real)
        opt_result = so.minimize(obj,
                                 x0=x0,
                                 method='Newton-CG',
                                 jac=grad_obj,
                                 hess=hess_obj,
                                 options=options,
                                 callback=update_vars)

        # Clear the iteration data.
        del iteration_data
        iteration_data = None

        ### On all but the last iteration, expand the basis:
        if ind_expansion < num_expansions - 1:
            old_basis = copy.deepcopy(basis)

            # Expand by [H, [H, \tau]]
            expand_com(H,
                       com_residual,
                       inds_extended_basis_com,
                       basis,
                       dbasis // 2,
                       args['explored_basis'],
                       args['explored_extended_basis'],
                       args['explored_com_data'],
                       truncation_size=truncation_size,
                       verbose=verbose)

            # Expand by \{\tau, \tau\}
            if verbose:
                print('|Basis of \\tau^2|             = {}'.format(
                    len(inds_extended_basis_anticom)),
                      flush=True)

            expand_anticom(anticom_residual, inds_extended_basis_anticom,
                           basis, dbasis // 2, args['explored_extended_basis'])

            # Project onto the new basis.
            tau = project(basis, qy.Operator(taus[-1], old_basis))
            tau = tau.coeffs.real / nla.norm(tau.coeffs.real)

        if verbose:
            print('|Explored basis|              = {}'.format(
                len(args['explored_basis'])),
                  flush=True)
            print('|Explored extended basis|     = {}'.format(
                len(args['explored_extended_basis'])),
                  flush=True)

        if verbose and ind_expansion != num_expansions - 1:
            print('\\tau = ', flush=True)
            print_operator(qy.Operator(tau, basis))

        ### Do some book-keeping.
        # Keep track of how many \tau's were evaluated in the current expansion.
        num_taus_in_expansion.append(len(taus) - prev_num_taus)
        for ind_tau in range(prev_num_taus, len(taus)):
            ind_expansion_from_ind_tau[ind_tau] = ind_expansion
        prev_num_taus = len(taus)

    if verbose:
        print('Computing fidelities.', flush=True)
    start = time.time()

    initial_tau_vector = initial_tau.coeffs
    initial_tau_inds = np.array(
        [args['explored_basis'].index(os_tv) for os_tv in initial_tau._basis],
        dtype=int)

    final_tau_vector = taus[-1]
    final_tau_inds = basis_inds[-1]

    fidelities = []
    initial_fidelities = []
    final_fidelities = []
    proj_final_fidelities = []
    for ind_tau in range(len(taus)):
        ind_expansion = ind_expansion_from_ind_tau[ind_tau]

        tau_vector = taus[ind_tau]
        tau_inds = basis_inds[ind_expansion]

        # For debugging, check that the quantities stored using basis_inds
        # agree with their recorded values during the optimization.
        if _check_quantities:
            basis_op = qy.Basis(
                [args['explored_basis'][ind_eb] for ind_eb in tau_inds])
            tau_op = qy.Operator(tau_vector, basis_op)

            com_norm_recorded = com_norms[ind_tau]
            binarity_recorded = binarities[ind_tau]

            com_norm_check = qy.commutator(H, tau_op).norm()**2.0
            id_op = qy.Operator([1.], [identity])
            binarity_check = (0.5 * qy.anticommutator(tau_op, tau_op) -
                              id_op).norm()**2.0

            print(
                'For stored tau_{}, com_norm = {}, com_norm_check = {}'.format(
                    ind_tau, com_norm_recorded, com_norm_check),
                flush=True)
            print(
                'For stored tau_{}, binarity = {}, binarity_check = {}'.format(
                    ind_tau, binarity_recorded, binarity_check),
                flush=True)

            assert (np.abs(com_norm_recorded - com_norm_check) < 1e-12)
            assert (np.abs(binarity_recorded - binarity_check) < 1e-9)

        if ind_tau > 0:
            ind_expansion_prev = ind_expansion_from_ind_tau[ind_tau - 1]

            prev_tau_vector = taus[ind_tau - 1]
            prev_tau_inds = basis_inds[ind_expansion_prev]
            overlap = compute_overlap_inds(prev_tau_vector, prev_tau_inds,
                                           tau_vector, tau_inds)
            fidelity = np.abs(overlap)**2.0

            fidelities.append(fidelity)

            # Project the final \tau into the current tau's basis.
            proj_final_tau_vector = project_vector(final_tau_vector,
                                                   final_tau_inds, tau_inds)
            # Normalize the projected vector.
            proj_final_tau_vector /= nla.norm(proj_final_tau_vector)
            # And compute its fidelity with the current tau.
            overlap_proj_final = np.dot(np.conj(tau_vector),
                                        proj_final_tau_vector)
            fidelity_proj_final = np.abs(overlap_proj_final)**2.0

            proj_final_fidelities.append(fidelity_proj_final)

        overlap_initial = compute_overlap_inds(initial_tau_vector,
                                               initial_tau_inds, tau_vector,
                                               tau_inds)
        fidelity_initial = np.abs(overlap_initial)**2.0
        initial_fidelities.append(fidelity_initial)

        overlap_final = compute_overlap_inds(final_tau_vector, final_tau_inds,
                                             tau_vector, tau_inds)
        fidelity_final = np.abs(overlap_final)**2.0
        final_fidelities.append(fidelity_final)

    end = time.time()
    if verbose:
        print('Computed fidelities in {} seconds.'.format(end - start),
              flush=True)

    end_run = time.time()
    if verbose:
        print('Total time elapsed: {} seconds (or {} minutes or {} hours)'.
              format(end_run - start_run, (end_run - start_run) / 60.0,
                     (end_run - start_run) / 3600.0),
              flush=True)

    # Store the results in a dictionary.
    results_data = {
        'taus': taus,
        'tau_norms': tau_norms,
        'basis_inds': basis_inds,
        'basis_sizes': basis_sizes,
        'num_taus_in_expansion': num_taus_in_expansion,
        'ind_expansion_from_ind_tau': ind_expansion_from_ind_tau,
        'com_norms': com_norms,
        'binarities': binarities,
        'objs': objs,
        'fidelities': fidelities,
        'initial_fidelities': initial_fidelities,
        'final_fidelities': final_fidelities,
        'proj_final_fidelities': proj_final_fidelities
    }

    # Save the results to a file if provided.
    if results_filename is not None:
        # Record the input arguments in addition to the
        # results, but do not store the saved commuation
        # and anticommutation data, just the explored basis.
        args_to_record = dict()
        for key in args:
            if key not in [
                    'explored_com_data', 'explored_anticom_data',
                    'explored_extended_basis'
            ]:
                args_to_record[key] = args[key]

        data = [args_to_record, results_data]
        results_file = open(results_filename, 'wb')
        pickle.dump(data, results_file)
        results_file.close()

        args_to_record.clear()
        del args_to_record

    # The final optimized operator O and its commutator norm and binarity.
    tau_op = qy.Operator(taus[-1], basis)
    com_norm = com_norms[-1]
    binarity = binarities[-1]

    # For debugging, check that the final answer's quantities agree with
    # expectations.
    if _check_quantities:
        com_norm_check = qy.commutator(H, tau_op).norm()**2.0
        id_op = qy.Operator([1.], [identity])
        binarity_check = (0.5 * qy.anticommutator(tau_op, tau_op) -
                          id_op).norm()**2.0

        print('final com_norm = {}, com_norm_check = {}'.format(
            com_norm, com_norm_check),
              flush=True)
        print('final binarity = {}, binarity_check = {}'.format(
            binarity, binarity_check),
              flush=True)

        assert (np.abs(com_norm - com_norm_check) < 1e-12)
        assert (np.abs(binarity - binarity_check) < 1e-12)

    if verbose:
        print('Final tau = ', flush=True)
        print_operator(tau_op, num_terms=200)

        print('Final com norm  = {}'.format(com_norm), flush=True)
        print('Final binarity  = {}'.format(binarity), flush=True)

    return [tau_op, com_norm, binarity, results_data]
Esempio n. 11
0
    def obj(y):
        # Nonlocal variables that will be used or
        # modified in this function.
        nonlocal basis, L_H, com_norm, binarity, obj_val, com_residual, anticom_residual, inds_extended_basis_anticom, ind_identity_anticom, identity, _check_quantities

        com_residual = L_H.dot(y)
        com_norm = nla.norm(com_residual)**2.0

        [_, Lbar_tau] = updated_iteration_data(y)

        anticom_residual = 0.5 * Lbar_tau.dot(y)
        anticom_residual[ind_identity_anticom] -= 1.0
        binarity = nla.norm(anticom_residual)**2.0

        # For debugging, check that commutator norm and binarity are correct:
        if _check_quantities:
            optest = qy.Operator(y, basis)
            id_op = qy.Operator([1.], [identity])
            com_norm_check = qy.commutator(H, optest).norm()**2.0
            optest_sqr = 0.5 * qy.anticommutator(optest, optest)
            binarity_check = (optest_sqr - id_op).norm()**2.0

            print('com_norm = {}, com_norm_check = {}'.format(
                com_norm, com_norm_check),
                  flush=True)
            print('binarity = {}, binarity_check = {}'.format(
                binarity, binarity_check),
                  flush=True)

            # Check that what the algorithm (bioms) thinks is O^2-I
            # agrees with what a brute-force calculation (qosy) thinks is O^2-I.

            # Check that they include the same operator strings.
            extended_basis_anticom = qy.Basis([
                args['explored_extended_basis'][ind_exp]
                for ind_exp in inds_extended_basis_anticom
            ])
            check_os = True
            for (_, os_) in optest_sqr:
                if os_ not in extended_basis_anticom:
                    print('extended_basis_anticom is missing {}'.format(os_),
                          flush=True)
                    check_os = False
            for os_ in extended_basis_anticom:
                if os_ not in optest_sqr._basis and os_ != identity:
                    coeff_ = anticom_residual[extended_basis_anticom.index(
                        os_)]
                    if np.abs(coeff_) > 1e-12:
                        print(
                            'extended_basis_anticom has an extra operator {}'.
                            format(os_),
                            flush=True)
                        check_os = False
            if not check_os:
                print('\nO = ', flush=True)
                print_operator(qy.Operator(y, basis), np.inf)

                print('\nWhat bioms thinks is O^2-I = ', flush=True)
                print_operator(
                    qy.Operator(anticom_residual, extended_basis_anticom),
                    np.inf)

                print('\nWhat qosy thinks is O^2-I =', flush=True)
                print_operator(optest_sqr - id_op, np.inf)
            assert (check_os)

            assert (np.abs(com_norm - com_norm_check) < 1e-12)
            assert (np.abs(binarity - binarity_check) < 1e-5)

        obj_val = coeff_com_norm * com_norm + coeff_binarity * binarity

        return obj_val
Esempio n. 12
0
def bose_hubbard_square(L, periodic=False):
    """Construct a 2D square-lattice hard-core Bose-Hubbard Hamiltonian (without magnetic fields), 
    which when written in terms of spin operators is an XX-model of the form:
        H = -1/2 \\sum_<ij> (X_i X_j + Y_i Y_j)
    
    Parameters
    ----------
    L : int
        The side-length of the square.
    periodic : bool, optional
        Specifies whether the model is periodic. Defaults to False.
    
    Returns
    -------
    qosy.Operator
        The Hamiltonian.
    
    Examples
    --------
    Build a 5x5 2D hard-core Bose-Hubbard model:
        >>> H = xxz_square(5, 1.0, 1.0)
    """

    Lx = L
    Ly = L
    N = Lx * Ly

    coeffs = []
    op_strings = []

    for y in range(Ly):
        for x in range(Lx):
            # Two bonds
            for bond in [(1, 0), (0, 1)]:
                site = y * Lx + x

                dx = bond[0]
                dy = bond[1]

                # Bond pointing to the right and up
                xp = x + dx
                yp = y + dy
                if periodic:
                    xp = xp % Lx
                    yp = yp % Ly

                if xp >= 0 and xp < Lx and yp >= 0 and yp < Ly:
                    sitep = yp * Lx + xp

                    s1 = np.minimum(site, sitep)
                    s2 = np.maximum(site, sitep)

                    for orb in ['X', 'Y']:
                        coeffs.append(-0.5)  # J = 1.0
                        op_strings.append(
                            qy.opstring('{} {} {} {}'.format(orb, s1, orb,
                                                             s2)))

    H = qy.Operator(coeffs, op_strings)

    return H
def create_zero_modes(zm_type):
    # Create a double_ring or double_gaussian zero mode.
    #
    # Return the \alpha_i, \beta_j parameters defining
    # the zero mode as well as the chemical potential
    # and pairing parameters defining the Hamiltonian
    # that commutes with these zero modes.

    alphas = np.zeros((L1, L2))
    betas = np.zeros((L1, L2))
    mus = np.zeros((L1, L2))

    for ind1 in range(L1):
        for ind2 in range(L2):
            x = ind1 * a1 + ind2 * a2

            # Double ring Gaussian MZMs
            if zm_type == 'double_ring':
                alphas[ind1, ind2] = np.exp(
                    -((nla.norm(x - center) - R1) /
                      (np.sqrt(2) * sigma1))**2.0)  #+ noise*np.random.rand()
                betas[ind1, ind2] = np.exp(
                    -((nla.norm(x - center) - R2) /
                      (np.sqrt(2) * sigma2))**2.0)  #+ noise*np.random.rand()

            # Split double-Gaussian MZMs
            elif zm_type == 'double_gaussian':
                alphas[ind1,
                       ind2] = np.exp(-(nla.norm(x - xA) /
                                        (np.sqrt(2) * sigma))**2.0) + np.exp(
                                            -(nla.norm(x - xC) /
                                              (np.sqrt(2) * sigma))**2.0)
                betas[ind1,
                      ind2] = np.exp(-(nla.norm(x - xB) /
                                       (np.sqrt(2) * sigma))**2.0) + np.exp(
                                           -(nla.norm(x - xD) /
                                             (np.sqrt(2) * sigma))**2.0)
            else:
                raise ValueError('Invalid zero mode type: {}'.format(zm_type))

    hamiltonian = qy.Operator([], [], 'Fermion')
    for ind1 in range(L1):
        for ind2 in range(L2):
            orb = ind2 * L1 + ind1
            i = orb
            ai = alphas[ind1, ind2]
            bi = betas[ind1, ind2]

            if ind1 < L1 - 1:
                orb1 = ind2 * L1 + (ind1 + 1)
                j = orb1
                aj = alphas[ind1 + 1, ind2]
                bj = betas[ind1 + 1, ind2]
                hamiltonian += two_site_ham1(ai, bi, aj, bj, i, j)

            if ind2 < L2 - 1:
                orb2 = (ind2 + 1) * L1 + ind1
                j = orb2
                aj = alphas[ind1, ind2 + 1]
                bj = betas[ind1, ind2 + 1]
                hamiltonian += two_site_ham1(ai, bi, aj, bj, i, j)

    mus = np.zeros((L1, L2))
    D1 = np.zeros((L1, L2))
    D2 = np.zeros((L1, L2))
    for (coeff, os) in hamiltonian:
        if len(os.orbital_operators) == 2:
            if os.orbital_operators[0] == 'CDag' and os.orbital_operators[
                    1] == 'CDag':
                i = os.orbital_labels[0]
                j = os.orbital_labels[1]

                x1 = i % L1
                y1 = i // L1

                x2 = j % L1
                y2 = j // L1

                #print('(x1,y1)=({},{}), (x2,y2)=({},{})'.format(x1,y1,x2,y2))

                if x2 == x1 + 1 and y2 == y1:
                    D1[x1, y1] += np.real(coeff)
                elif x2 == x1 and y2 == y1 + 1:
                    D2[x1, y1] += np.real(coeff)
                else:
                    raise ValueError(
                        'Invalid term in Hamiltonian: {}'.format(os))
            elif os.orbital_operators[0] == 'CDag' and os.orbital_operators[
                    1] == 'C':
                i = os.orbital_labels[0]
                j = os.orbital_labels[1]
                x1 = i % L1
                y1 = i // L1

                if i == j:
                    mus[x1, y1] += np.real(coeff)
            else:
                raise ValueError('Invalid term in Hamiltonian: {}'.format(os))

    Dabs = np.sqrt(np.abs(D1)**2.0 + np.abs(D2)**2.0)

    Dangle = np.arctan2(D2, D1) / np.pi

    return (alphas, betas, mus, D1, D2, Dabs, Dangle)
Esempio n. 14
0
def test_s_constants_l_matrix2():
    # Tests that the build_s_constants and build_l_matrix functions work when
    # keeping the basis fixed but allowing the
    # operator to change.

    for mode in ['commutator', 'anticommutator']:
        explored_basis = qy.Basis()
        explored_extended_basis = qy.Basis()
        explored_s_constants_data = dict()

        # Explore twice with two different operators but the same basis.
        basis = qy.Basis([qy.opstring(s_os) for s_os in ['Z 0', 'Z 1']])

        # Operator 1
        op1 = qy.Operator([0.1, 0.2, 0.3, 0.4], [
            qy.opstring(s_os)
            for s_os in ['Z 0', 'X 0 X 1', 'Z 2 Z 3', 'Y 0 Y 2']
        ])

        [s_constants1, inds_ext_basis1,
         _] = build_s_constants(basis,
                                op1._basis,
                                explored_basis,
                                explored_extended_basis,
                                explored_s_constants_data,
                                operation_mode=mode)
        l_matrix1 = build_l_matrix(s_constants1, op1, basis, inds_ext_basis1)

        # Operator 2 has a larger basis than operator 1
        op2 = qy.Operator([0.1, 0.2, 0.3, 0.4, -0.5, -0.6], [
            qy.opstring(s_os) for s_os in
            ['Z 0', 'X 0 X 1', 'Z 2 Z 3', 'Y 0 Y 2', 'Z 2', 'X 2 X 3']
        ])

        [s_constants2, inds_ext_basis2,
         _] = build_s_constants(basis,
                                op2._basis,
                                explored_basis,
                                explored_extended_basis,
                                explored_s_constants_data,
                                operation_mode=mode)
        l_matrix2 = build_l_matrix(s_constants2, op2, basis, inds_ext_basis2)

        ext_basis2 = qy.Basis(
            [explored_extended_basis[ind_exp] for ind_exp in inds_ext_basis2])

        # Explore once with the second operator and the same basis.
        explored_basis = qy.Basis()
        explored_extended_basis = qy.Basis()
        explored_s_constants_data = dict()

        [s_constants3, inds_ext_basis3,
         _] = build_s_constants(basis,
                                op2._basis,
                                explored_basis,
                                explored_extended_basis,
                                explored_s_constants_data,
                                operation_mode=mode)
        l_matrix3 = build_l_matrix(s_constants3, op2, basis, inds_ext_basis3)

        ext_basis3 = qy.Basis(
            [explored_extended_basis[ind_exp] for ind_exp in inds_ext_basis3])

        print('basis      = \n{}'.format(basis))
        print('ext_basis2 = \n{}'.format(ext_basis2))
        print('ext_basis3 = \n{}'.format(ext_basis3))

        # Compare results.
        assert (ext_basis2 == ext_basis3)

        diff_lmatrices = ssla.norm(l_matrix2 - l_matrix3)
        assert (diff_lmatrices < 1e-12)
Esempio n. 15
0
            # Skip the data collection if the operator is not the converged
            # one and we only care about recording the converged data.
            if record_only_converged and not is_final_expanded_tau:
                continue
            
            # The dictionary to save this particular
            # operator's info to.
            tau_dict = dict()
            
            # The expansion index.
            ind_expansion = ind_expansion_from_ind_tau[ind_tau]
            
            coeffs              = taus[ind_tau]
            inds_explored_basis = results_data['basis_inds'][ind_expansion]
            op_strings          = [explored_basis[ind_os] for ind_os in inds_explored_basis]
            operator            = qy.Operator(coeffs, op_strings)

            # Compute the overlap with the previously considered tau operator
            # (which will be the converged tau operator from the previous
            #  expansion if record_only_converged is set to True).
            tau_vector = coeffs
            tau_inds   = inds_explored_basis
            if prev_tau_vector is not None:
                expansion_overlap  = compute_overlap_inds(prev_tau_vector, prev_tau_inds,
                                                          tau_vector, tau_inds)
                expansion_fidelity = np.abs(expansion_overlap)**2.0
            else:
                expansion_fidelity = np.nan
            prev_tau_vector = np.copy(tau_vector)
            prev_tau_inds   = np.copy(tau_inds)
            
Esempio n. 16
0
def xxz_square(L, J_xy, J_z, periodic=False):
    """Construct a 2D square-lattice XXZ Hamiltonian H = 1/4 \\sum_<ij> [J_xy (X_i X_j + Y_i Y_j) + J_z Z_i Z_j]
    
    Parameters
    ----------
    L : int
        The side-length of the square.
    J_xy : float
        The coefficient in front of the exchange term.
    J_z : float
        The coefficient in front of the Ising term.
    periodic : bool, optional
        Specifies whether the model is periodic. Defaults to False.
    
    Returns
    -------
    qosy.Operator
        The Hamiltonian.
    
    Examples
    --------
    Build a 5x5 2D Heisenberg model:
        >>> H = xxz_square(5, 1.0, 1.0)
    """

    Lx = L
    Ly = L
    N = Lx * Ly

    coeffs = []
    op_strings = []

    for y in range(Ly):
        for x in range(Lx):
            # Two bonds
            for bond in [(1, 0), (0, 1)]:
                site = y * Lx + x

                dx = bond[0]
                dy = bond[1]

                # Bond pointing to the right and up
                xp = x + dx
                yp = y + dy
                if periodic:
                    xp = xp % Lx
                    yp = yp % Ly

                if xp >= 0 and xp < Lx and yp >= 0 and yp < Ly:
                    sitep = yp * Lx + xp

                    s1 = np.minimum(site, sitep)
                    s2 = np.maximum(site, sitep)

                    for orb in ['X', 'Y', 'Z']:
                        if orb in ['X', 'Y']:
                            coeffs.append(0.25 * J_xy)
                        else:
                            coeffs.append(0.25 * J_z)
                        op_strings.append(
                            qy.opstring('{} {} {} {}'.format(orb, s1, orb,
                                                             s2)))

    H = qy.Operator(coeffs, op_strings)

    return H
Esempio n. 17
0
def single_site_parity(site, num_orbitals, mode=None):
    """Create a single Z_i operator at the given site
    (or I-2N_i in terms of fermionic operators).

    Parameters
    ----------
    site : int
         The site i.
    num_orbitals : int
         The number of orbitals N to consider.
    mode : str, optional
         The basis B of OperatorStrings to use to
         represent the operator Z_i in. "constant"
         uses B={Z_i}. "linear" uses B={Z_j}_{j=1}^N.
         "quadratic" uses B={A_i B_j}_{i<=j=1}^N
         where A_i, B_j are Majorana fermions. 
         Defaults to "constant".

    Returns
    -------
    qosy.Operator
        The Z_i operator.

    Note
    ----
    This returns an Operator that is a sum of Majorana strings
    to accomodate different initializations. This can be converted
    to a sum of Pauli strings using qosy.convert().
    """

    if mode is None:
        mode = 'constant'

    # Initial operator is a D at the given site.
    coeffs = [1.0]
    op_strings = [qy.opstring('D {}'.format(site))]

    if mode != 'constant':
        for site1 in range(num_orbitals):
            if mode == 'quadratic':
                for site2 in range(site1, num_orbitals):
                    if site1 != site2:
                        coeffs.append(0.0)
                        op_strings.append(
                            qy.opstring('1j A {} B {}'.format(site1, site2)))
                        coeffs.append(0.0)
                        op_strings.append(
                            qy.opstring('1j B {} A {}'.format(site1, site2)))
                    else:
                        if site1 != site:
                            coeffs.append(0.0)
                            op_strings.append(qy.opstring(
                                'D {}'.format(site1)))
            elif mode == 'linear':
                if site1 != site:
                    coeffs.append(0.0)
                    op_strings.append(qy.opstring('D {}'.format(site1)))
            else:
                raise ValueError('Invalid mode: {}'.format(mode))

    initial_op = qy.Operator(coeffs, op_strings)

    return initial_op
L = 100
xA = 0.0
xB = L-1.0
sigma = 10.0

x  = np.arange(L)
alphas = np.exp(-((x-xA)/(np.sqrt(2)*sigma))**2.0)
betas  = np.exp(-((x-xB)/(np.sqrt(2)*sigma))**2.0)

t  = 1.0
D  = t*np.tanh((xB-xA)/(2.0*(sigma**2.0)))
mu = np.zeros(L)

# Create the zero mode Hamiltonian from bond operators.
print('D = {}'.format(D))
hamiltonian = qy.Operator([], [], 'Fermion')
for j in range(L-1):
    coeffs     = []
    op_strings = []

    coeffs.append(t)
    op_strings.append(qy.opstring('CDag {} C {}'.format(j, j+1)))
    
    coeffs.append(D)
    op_strings.append(qy.opstring('CDag {} CDag {}'.format(j,j+1)))

    muj   = -2.0*t*np.exp(-(2.0*(x[j]-xA) + 1.0)/(2.0*(sigma**2.0))) / (1.0 + np.exp(-(xB-xA)/(sigma**2.0)))
    mujp1 = -2.0*t*np.exp((2.0*(x[j]-xB) + 1.0)/(2.0*(sigma**2.0))) / (1.0 + np.exp(-(xB-xA)/(sigma**2.0)))
 
    coeffs.append(muj)
    op_strings.append(qy.opstring('CDag {} C {}'.format(j,j)))