예제 #1
0
def svd(basis, A, B, v, Compute_UV=True):
    """Compute the singular value decomposition of vector v
        ---A, B are the lists of sites in each bipartition"""

    # Setup bases for A & B
    A_lattice = tfim.Lattice([len(A)])
    B_lattice = tfim.Lattice([len(B)])

    A_basis = tfim.IsingBasis(A_lattice)
    B_basis = tfim.IsingBasis(B_lattice)

    # Build psi matrix
    psiMat = np.zeros([A_basis.M, B_basis.M])
    for index in range(basis.M):
        state = basis.state(index)
        a_state = state[A]
        b_state = state[B]
        a_index = A_basis.index(a_state)
        b_index = B_basis.index(b_state)
        psiMat[a_index, b_index] = v[index]

    # Perform SVD
    if Compute_UV:
        U, S, V = linalg.svd(psiMat, compute_uv=True)
        return S, U, V
    else:
        S = linalg.svd(psiMat, compute_uv=False)
        return S
예제 #2
0
def main():
    
    # Parse command line arguements
    ###################################
    parser = argparse.ArgumentParser(description=("Search for second order perturbation theory approximable Jij matrices") )
    parser.add_argument('lattice_specifier', 
                            help=(  "Either: L (linear dimensions of the system)"
                                    " or the filename base of matrix files") )
    parser.add_argument('seed_limit', help = ("Specifying the range of seeds to search through"))
    
    parser.add_argument('-o', default='output', help='output filename base')
    
    parser.add_argument('-PBC', type = bool, default = True, help = "Specifying PBC")
    
    parser.add_argument('-J', type=float, default = 1.0, help = 'Nearest neighbor Ising coupling')
    
    parser.add_argument('-max_order', type=int, default = 4, help = 'The maximum Hamming distance between degenerate ground states')
    
    args = parser.parse_args()
    ###################################
    
    L = [ int(args.lattice_specifier) ]
    PBC = args.PBC
    J = args.J
    seed_limit = int(args.seed_limit)
    max_order = int(args.max_order)
    
    # Build lattice and basis
    lattice = tfim.Lattice(L, PBC)
    N = lattice.N
    basis = tfim.IsingBasis(lattice)
    
    # Specifying parameters needed
    seed_range = range(seed_limit)
    
    # Begin search
    Jij_array = [];

    for i in seed_range:
        Jij = tfim.Jij_instance(N,J,"bimodal",i) 
        Jij_array.append(Jij)
        
    # Calculate energy array:
    indices_array = []

    for Jij in Jij_array:
        Energies = -tfim.JZZ_SK_ME(basis,Jij)
        GS_energy = np.min(Energies)
        GS_indices = np.nonzero(Energies == GS_energy)[0]
        indices_array.append(GS_indices)
    
    # Search for Hamming distance 2
    seed_list = []
    
    for index, indices in enumerate(indices_array):
        if tfim_perturbation.judge(max_order, tfim_perturbation.Hamming_array(indices, basis), N):
            seed_list.append(index)

    print(seed_list)
예제 #3
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('-N', type=int, default=16, help='Number of spins')
    parser.add_argument('-C',
                        type=int,
                        default=3,
                        help='J Matrix configuration')
    args = parser.parse_args()

    num_of_spins = args.N
    configuration = args.C
    J_Matrix = np.loadtxt("J_Matrices/J" + str(num_of_spins) + "/J_Matrix" +
                          str(configuration) + "/" + str(num_of_spins) +
                          "J_Matrix" + str(configuration) + ".dat")
    lattice = tfim.Lattice([num_of_spins], True)
    basis = tfim.IsingBasis(lattice)
    infdim_Energy_Array = tfim.JZZ_SK_ME(basis, J_Matrix)

    np.savetxt(
        "J_Matrices/J" + str(num_of_spins) + "/J_Matrix" + str(configuration) +
        "/EnergyArray.dat", infdim_Energy_Array)
예제 #4
0
L = [3]
Jij_seed = 21
h_x_range = np.arange(0, 0.001, 0.00002)

# In[4]:

PBC = True
J = 1

# In[5]:

# Build lattice and basis
###################################
lattice = tfim.Lattice(L, PBC)
N = lattice.N
basis = tfim.IsingBasis(lattice)
###################################

# construct random J matrix
Jij = tfim.Jij_instance(N, J, "bimodal", Jij_seed)

# In[6]:

# List out all the spin_states, corresponding indices and energies
Energies = -tfim.JZZ_SK_ME(basis, Jij)
for index in range(2**N):
    print(index, basis.state(index), Energies[index])

# In[7]:

# Build 2nd order approximated matrix
예제 #5
0
def lanczos(L, seed, h_x_range, PBC, h_z, maxiter):

    # In[3]:

    start_time = time.time()

    def Jij_2D_NN(seed, N, PBC, xwidth, yheight, lattice, p):
        def bond_list_unequal(seed, N, PBC, xwidth, yheight, p):
            # p is the probability distribution of ferromagnetic bonds
            np.random.seed(seed)
            if PBC == True:
                num_of_bonds = 2 * N
            else:
                num_of_bonds = (xwidth - 1) * (yheight) + (xwidth) * (yheight -
                                                                      1)
            i = [np.random.random() for _ in range(num_of_bonds)]
            # print(i)
            a = np.zeros(len(i))
            for index, prob_seed in enumerate(i):
                if prob_seed <= p:
                    a[index] += 1
                else:
                    a[index] -= 1
            return a

        def make_Jij(N, b_list, lattice):
            #Goes through the list of bonds to make the jij matrix that tells you how all of the spins are bonded to each other

            bond_index = 0
            Jij = np.zeros((N, N))
            for i in range(0, N):
                NNs = lattice.NN(i)
                for j in NNs:
                    if Jij[i][j] == 0:
                        Jij[i][j] = b_list[bond_index]
                        Jij[j][i] = b_list[bond_index]
                        bond_index += 1
            return Jij

        b_list = bond_list_unequal(seed, N, PBC, xwidth, yheight, p)
        return make_Jij(N, b_list, lattice)

    # In[4]:

    # Build lattice and basis
    lattice = tfim.Lattice(L, PBC)
    N = lattice.N
    basis = tfim.IsingBasis(lattice)

    # In[5]:

    #construct random J matrix
    Jij = Jij_2D_NN(seed, N, PBC, L[0], L[1], lattice, p=0.5)

    # In[6]:

    # List out all the spin_states, corresponding indices and energies
    Ising_energy_arr = np.zeros(2**N)
    for index in range(2**N):
        state = basis.state(index)
        # modify state from 0 and 1 base to -1, 1 base
        for i in range(N):
            if state[i] == 0:
                state[i] -= 1
        Ising_energy = 0
        for i in range(N):
            for j in range(i + 1, N, 1):
                bond_energy = Jij[i, j] * state[i] * state[j]
                Ising_energy += bond_energy
        Ising_energy_arr[index] = Ising_energy
        print(index, basis.state(index), Ising_energy)
    print("----%s seconds ----" % (time.time() - start_time))

    # In[7]:

    GS_energy, GS_indices = tfim_perturbation.GS(Ising_energy_arr)

    # initialize Lanczos vector
    v0 = np.zeros(2**N)
    for i in GS_indices:
        v0[i] = 1

    # In[8]:

    # modified exact Hamiltonians using compressed sparse row matrices
    def V_exact_csr(basis, lattice):
        row = []
        col = []
        for ket in range(basis.M):
            state = basis.state(ket)
            for i in range(lattice.N):
                basis.flip(state, i)
                bra = basis.index(state)
                row.append(bra)
                col.append(ket)
                basis.flip(state, i)
        data = np.ones(len(col))
        V_exact = sparse.csr_matrix((data, (np.array(row), np.array(col))),
                                    shape=(2**N, 2**N))
        return V_exact

    def H_0_exact_csr(Energies):
        return sparse.diags(Energies)

    # In[9]:

    # modified function to eigendecompose the exact Hamiltonian using Lanczos method
    def exc_eigensystem(basis, h_x_range, lattice, Energies):
        # Calculate exact eigenvalues and eigenstates for range(h_x)
        exc_eigenvalues = np.zeros(len(h_x_range))
        first_excited_exc_energies = np.zeros(len(h_x_range))
        exc_eigenstates = np.zeros((len(h_x_range), basis.M))
        V_exc_csr = V_exact_csr(basis, lattice)
        H_0_exc_csr = H_0_exact_csr(Energies)
        for j, h_x in enumerate(h_x_range):
            H = H_0_exc_csr - V_exc_csr.multiply(h_x)
            exc_eigenvalue, exc_eigenstate = spla.eigsh(
                H,
                k=2,
                which='SA',
                v0=v0,
                maxiter=maxiter,
                tol=1e-5,
                return_eigenvectors=True)
            print("----%s seconds for h_x = %s----" %
                  (time.time() - start_time, h_x))

            exc_eigenvalues[j] = exc_eigenvalue[0]
            first_excited_exc_energies[j] = exc_eigenvalue[1]
            for k in range(basis.M):
                exc_eigenstates[j][k] = exc_eigenstate[k, 0]
        return V_exc_csr, H_0_exc_csr, exc_eigenvalues, first_excited_exc_energies, exc_eigenstates

    # In[10]:

    # Calculate exact eigenvalues and eigenstates for range(h_x)
    V_exc, H_0_exc, exc_eigenvalues, first_excited__exc_energies, exc_eigenstates = exc_eigensystem(
        basis, h_x_range, lattice, Ising_energy_arr)

    print("----%s seconds ----" % (time.time() - start_time))

    # In[13]:

    final = h_x_range[-1]
    init = h_x_range[1]
    num_steps = len(h_x_range)
    # first and second derivative of ground state energy per site
    first_derivative_exc_eigenvalues = np.gradient(
        exc_eigenvalues, (final - init) / float(num_steps))
    second_derivative_exc_eigenvalues = np.gradient(
        first_derivative_exc_eigenvalues, (final - init) / float(num_steps))

    # compute susciptibility

    # In[16]:

    chi_aa_matrix = np.zeros((len(h_x_range), lattice.N))
    for i, h_x in enumerate(h_x_range):
        for a in range(lattice.N):
            sigma_z = np.zeros(basis.M)
            for ket in range(basis.M):
                state = basis.state(ket)
                if state[a] == 1:
                    sigma_z[ket] += 1
                else:
                    sigma_z[ket] -= 1
            longitudinal_energy = spla.eigsh(H_0_exc - V_exc.multiply(h_x) -
                                             h_z * sparse.diags(sigma_z),
                                             k=1,
                                             which='SA',
                                             v0=v0,
                                             tol=1e-5,
                                             maxiter=maxiter,
                                             return_eigenvectors=False)[0]
            print("----%s seconds for h_x = %s----" %
                  (time.time() - start_time, h_x))
            chi_aa = 2. * abs(
                abs(exc_eigenvalues[i]) - abs(longitudinal_energy)) / (h_z**2)
            chi_aa_matrix[i, a] += chi_aa

    # In[18]:

    chi_ab_matrix = np.zeros((len(h_x_range), basis.N, basis.N))
    for i, h_x in enumerate(h_x_range):
        for a in range(lattice.N):
            sigma_z_a = np.zeros(basis.M)
            for ket in range(basis.M):
                state = basis.state(ket)
                if state[a] == 1:
                    sigma_z_a[ket] += 1
                else:
                    sigma_z_a[ket] -= 1
            for b in range(a, lattice.N, 1):
                sigma_z_b = np.zeros(basis.M)
                for ket in range(basis.M):
                    state = basis.state(ket)
                    if state[b] == 1:
                        sigma_z_b[ket] += 1
                    else:
                        sigma_z_b[ket] -= 1
                H = H_0_exc - V_exc.multiply(h_x) - (
                    sparse.diags(sigma_z_a) +
                    sparse.diags(sigma_z_b)).multiply(h_z)
                longitudinal_energy = spla.eigsh(H,
                                                 k=1,
                                                 which='SA',
                                                 v0=v0,
                                                 tol=1e-5,
                                                 maxiter=maxiter,
                                                 return_eigenvectors=False)[0]
                print("----%s seconds for h_x = %s----" %
                      (time.time() - start_time, h_x))
                chi_ab = abs(
                    abs(exc_eigenvalues[i]) - abs(longitudinal_energy)) / (
                        h_z**
                        2.) - 0.5 * (chi_aa_matrix[i, a] + chi_aa_matrix[i, b])
                chi_ab_matrix[i, a, b] += chi_ab
                chi_ab_matrix[i, b, a] += chi_ab
            # adding the diagonal elements
            for c in range(N):
                chi_ab_matrix[i, c, c] = chi_aa_matrix[i, c]

    chi_arr = np.zeros(len(h_x_range))
    for i, h_x in enumerate(h_x_range):
        chi_arr[i] += np.sum(chi_ab_matrix[i])
    print("----%s seconds ----" % (time.time() - start_time))

    # compute structure factor
    S_SG_arr = np.zeros(np.shape(h_x_range))
    for i, h_x in enumerate(h_x_range):
        psi0 = exc_eigenstates[i]
        for a in range(N):
            for b in range(N):
                sigma_z_a = np.zeros(basis.M)
                sigma_z_b = np.zeros(basis.M)
                for ket in range(basis.M):
                    state = basis.state(ket)
                    if state[a] == 1:
                        sigma_z_a[ket] += 1
                    else:
                        sigma_z_a[ket] -= 1
                for ket in range(basis.M):
                    state = basis.state(ket)
                    if state[b] == 1:
                        sigma_z_b[ket] += 1
                    else:
                        sigma_z_b[ket] -= 1
                S_ab = psi0 @ sparse.diags(sigma_z_a) @ sparse.diags(
                    sigma_z_b) @ psi0.T
                S_SG_arr[i] += S_ab**2.
    print("----%s seconds ----" % (time.time() - start_time))

    return N, h_x_range, exc_eigenvalues, first_excited__exc_energies, second_derivative_exc_eigenvalues, chi_arr, S_SG_arr
예제 #6
0
def main():

    # Parse command line arguements
    ###################################
    parser = argparse.ArgumentParser(
        description=("Exact numerical diagonalization of "
                     "transverse field Ising Models of the form:\n"
                     "H = -\sum_{ij} J_{ij}\sigma^z_i \sigma^z_j"
                     "- h \sum_i \sigma^x_i"))
    parser.add_argument('lattice_specifier',
                        help=("Either: L (linear dimensions of the system)"
                              " or the filename base of matrix files"))
    parser.add_argument('-D',
                        type=int,
                        default=1,
                        help='Number of spatial dimensions')
    parser.add_argument('--obc',
                        action='store_true',
                        help='Open boundary condintions (deault is PBC)')
    parser.add_argument('--h_min',
                        type=float,
                        default=0.0,
                        help='Minimum value of the transverse field')
    parser.add_argument('--h_max',
                        type=float,
                        default=4.0,
                        help='Maximum value of the transverse field')
    parser.add_argument('--dh',
                        type=float,
                        default=0.5,
                        help='Tranverse fied step size')
    parser.add_argument('-J',
                        type=float,
                        default=1.0,
                        help='Nearest neighbor Ising coupling')
    parser.add_argument('-k',
                        type=int,
                        default=3,
                        help='Number eigenvalues to resolve')
    parser.add_argument('-o', default='output', help='output filename base')
    parser.add_argument('--full',
                        action='store_true',
                        help='Full (rather than Lanczos) diagonalization')
    parser.add_argument('--save_state',
                        action='store_true',
                        help='Save ground state to file')
    parser.add_argument('--init_v0',
                        action='store_true',
                        help='Start Lanzcos with previous ground state')
    parser.add_argument('--load',
                        action='store_true',
                        help='Load matrices from file')
    parser.add_argument('--fidelity',
                        action='store_true',
                        help='Compute fidelities')
    parser.add_argument('--entropy',
                        action='store_true',
                        help='Compute entanglement entropies')
    parser.add_argument('--delta_h_F0',
                        type=float,
                        default=1E-4,
                        help='Inital \Delta h for fidelity')
    parser.add_argument('--N_F_steps',
                        type=int,
                        default=3,
                        help='Number of steps for fidelity')
    parser.add_argument('--overlap',
                        action='store_true',
                        help='Compute the overlap distribution')
    parser.add_argument('--N_ovlp_samples',
                        type=int,
                        default=10**4,
                        help='Number of samples of the overlap distribution')
    parser.add_argument(
        '--model',
        default=tfim.models[0],
        type=str,
        help=("Model type: " +
              "".join(["{}, ".format(mod_i) for mod_i in tfim.models])))

    args = parser.parse_args()
    ###################################

    # Load matricies from file
    ###################################
    load_matrices = args.load
    if load_matrices:
        loaded_params, JZZ, ZZ, Mz, Ms = tfim.load_diag_ME(
            args.lattice_specifier)
        Mx = tfim.load_Mx(args.lattice_specifier)
    ###################################

    # Set calculation Parameters
    ###################################
    out_filename = args.o + '.dat'

    # Transverse field
    h_arr = np.arange(args.h_min, args.h_max + args.dh / 2, args.dh)

    # Set model parameters from file or command line
    if load_matrices:
        L = loaded_params['L']
        D = len(L)
        PBC = loaded_params['PBC']
        J = loaded_params['J']
    else:
        D = args.D
        L = [int(args.lattice_specifier) for d in range(D)]
        PBC = not args.obc
        J = args.J

    # Check model type
    if args.model in tfim.models:
        model = args.model
        if model == "SK":
            Jij_filename = args.o + '_Jij.dat'
    else:
        print("\tInvalid model type: {}. Choose from: {}".format(
            args.model, tfim.models))
        exit()

    # Digaonalization flags
    k = args.k
    init_v0 = args.init_v0
    full_diag = args.full

    # Save state
    save_state = args.save_state
    if save_state:
        state_filename = args.o + '_psi0.dat'

    # Entropy
    entropy_on = args.entropy
    if entropy_on:
        Svn_filename = args.o + '_Svn.dat'
        ells = range(1, L[0])
        Svn = np.zeros(len(ells))

    # Fidelity
    fidelity_on = args.fidelity
    if fidelity_on:
        delta_h_F0 = args.delta_h_F0
        N_F_steps = args.N_F_steps
        dhf = np.flip(delta_h_F0 / (2**(np.arange(N_F_steps))), axis=0)
        F2 = np.zeros(dhf.shape)
        F2_filename = args.o + '_F2.dat'

    # Overlap
    overlap_on = args.overlap
    if overlap_on:
        N_ovlp_samples = args.N_ovlp_samples
        Pq_filename = args.o + '_Pq.dat'

    parameter_string = ("model = {}, D = {}, L = {}, PBC = {}, J = {},"
                        " k = {}".format(model, D, L, PBC, J, k))
    print('\tStarting tfim_diag using parameters:\t' + parameter_string)
    ###################################

    # Setup physical quantities
    ##################################
    # Quantities to write ouput file
    phys_keys = ['h', 'e0', 'Delta_1', 'Delta_2', 'Mx', 'Mz2', 'Cnn', 'Ms2']
    phys = {}  # Dictionary for values
    ##################################

    # Build lattice and basis
    ###################################
    lattice = tfim.Lattice(L, PBC)
    N = lattice.N
    basis = tfim.IsingBasis(lattice)
    ###################################

    # Setup output data files
    ##################################
    width = 25
    precision = 16
    header_list = [tfim.phys_labels[key] for key in phys_keys]
    header = ''.join(
        ['{:>{width}}'.format(head, width=width) for head in header_list])
    out_file = open(out_filename, 'w')
    print("\tData will write to {}".format(out_filename))
    out_file.write('#\ttfim_diag parameters:\t' + parameter_string + '\n' +
                   '#' + header[1:] + '\n')

    if save_state:
        state_file = open(state_filename, 'w')
        print("\tGround state will write to {}".format(state_filename))
        state_file.write(
            "# tfim_diag parameters:\t{}\n".format(parameter_string) +
            "#{:>{width_h}}{:>{width_psi}}\n".format(
                'h', '\psi_0', width_h=(width - 1), width_psi=(width + 1)))

    if entropy_on:
        Svn_header = ("#{:>{width}}".format('h', width=(width - 1)) + ''.join([
            '{:{width}.{prec}e}'.format(
                ell, width=(width + 1), prec=(precision - 1)) for ell in ells
        ]))
        Svn_file = open(Svn_filename, 'w')
        print("\tEntropies will write to {}".format(Svn_filename))
        Svn_file.write('#\ttfim_diag parameters:\t' + parameter_string + '\n' +
                       '#' + Svn_header[1:] + '\n')

    if fidelity_on:
        F2_header = ("#{:>{width}}".format('h', width=(width - 1)) + ''.join([
            '{:{width}.{prec}e}'.format(
                dhfi, width=(width + 1), prec=(precision - 1)) for dhfi in dhf
        ]))
        F2_file = open(F2_filename, 'w')
        print("\tFidelities will write to {}".format(F2_filename))
        F2_file.write('#\ttfim_diag parameters:\t' + parameter_string + '\n' +
                      '#' + F2_header[1:] + '\n')

    if overlap_on:
        q = np.arange(-N, N + 1, 2) / float(N)
        Pq_header = ("#{:>{width}}".format('h', width=(width - 1)) + ''.join([
            '{:{width}.{prec}e}{:>{width}}'.format(
                qi, 'error', width=(width + 1), prec=(precision - 1))
            for qi in q
        ]))
        Pq_file = open(Pq_filename, 'w')
        print("\tOverlap distributions will write to {}".format(Pq_filename))
        Pq_file.write('#\ttfim_diag parameters:\t' + parameter_string + '\n' +
                      '#' + Pq_header[1:] + '\n')
    ##################################

    # Build Matricies
    ###################################
    if not load_matrices:
        print('\tBuilding matrices...')
        JZZ, ZZ = tfim.z_correlations_NN(lattice, basis, J)
        Mz, Ms = tfim.z_magnetizations(lattice, basis)
        Mx = tfim.build_Mx(lattice, basis)

        # Infinite range J_{ij} models
        if model in ["SK", "IR"]:
            if model == "IR":
                Jij = J * np.ones((N / 2, N)) / N
            elif model == "SK":
                Jij = tfim.Jij_instance(N, J)
                print("\tWriting Jij to {}".format(Jij_filename))
                np.savetxt(Jij_filename,
                           Jij,
                           header="N = {}, J = {}".format(N, J),
                           fmt='%{}.{}e'.format(width, precision - 1))
            JZZ = tfim.JZZ_SK(basis, Jij)
    ###################################

    # Main Diagonalization Loop
    #######################################################
    if full_diag:
        print("\tStarting full diagaonalization with h in ({},{}), "
              "dh = {}".format(h_arr[0], h_arr[-1], args.dh))
    else:
        print("\tStarting sparse diagaonalization with k={} and "
              "h in ({},{}), dh ={}".format(k, h_arr[0], h_arr[-1], args.dh))
    bar = progressbar.ProgressBar()
    v0 = None
    for h in bar(h_arr):

        H = -JZZ - h * Mx
        if full_diag:
            # Full diagonalize
            E, v = linalg.eigh(H.todense())
        else:
            # Sparse diagonalize
            E, v = spla.eigsh(H, k=k, which='SA', v0=v0)

        # Sort eigenvalues/vectors
        sort_order = np.argsort(E)
        E = E[sort_order]
        v = v[:, sort_order]

        # Grab Energies & ground state
        e0 = E[0] / N
        Delta = E - E[0]
        psi0 = v[:, 0]

        # Set starting vector for Lanczos:
        if not full_diag and init_v0:
            v0 = psi0

        # Compute expectation values
        ###################################
        Mx0 = np.real((psi0.conj().T).dot(Mx.dot(psi0))) / N
        Mz20 = np.real((psi0.conj().T).dot((Mz.power(2)).dot(psi0))) / (N**2)
        Cnn = np.real((psi0.conj().T).dot(ZZ.dot(psi0))) / lattice.N_links
        Ms20 = np.real((psi0.conj().T).dot((Ms.power(2)).dot(psi0))) / (N**2)
        ###################################

        # Compute fidelities
        ###################################
        if fidelity_on:
            for i, dhfi in enumerate(dhf):
                H_F = H - dhfi * Mx
                E_F, v_F = spla.eigsh(H_F, k=1, which='SA', v0=psi0)
                # Sort eigenvalues/vectors
                sort_order_F = np.argsort(E_F)
                E_F = E_F[sort_order_F]
                v_F = v_F[:, sort_order_F]
                F2[i] = (np.absolute(np.vdot(v_F[:, 0], psi0)))**2
        ###################################

        # Overlap distribution
        ###################################
        if overlap_on:
            Pq, Pq_err, q = basis.sample_overlap_distribution(
                psi0, N_ovlp_samples)
        ###################################

        # Entropy
        ###################################
        if entropy_on:
            for l, ell in enumerate(range(1, L[0])):
                A = range(0, ell)
                B = range(ell, L[0])
                S = tfim_rdm.svd(basis, A, B, psi0, False)
                Svn[l] = tfim_rdm.entropy(S)
        ###################################

        # Put physical values in phys dictionary
        ###################################
        phys['h'] = h
        phys['e0'] = e0
        phys['Delta_1'] = Delta[1]
        phys['Delta_2'] = Delta[2]
        phys['Mx'] = Mx0
        phys['Mz2'] = Mz20
        phys['Cnn'] = Cnn
        phys['Ms2'] = Ms20
        ###################################

        # Write data to output files
        ###################################
        data_list = [phys[key] for key in phys_keys]
        data_line = ''.join([
            '{:{width}.{prec}e}'.format(data, width=width, prec=precision)
            for data in data_list
        ])
        out_file.write(data_line + '\n')

        # Write psi0 to file
        if save_state:
            np.savetxt(state_file,
                       np.concatenate(([h], psi0)).reshape(
                           (1, psi0.shape[0] + 1)),
                       fmt='%{}.{}e'.format(width, precision - 1))

        # Write entropy to file
        if entropy_on:
            np.savetxt(Svn_file,
                       np.concatenate(([h], Svn)).reshape(
                           (1, Svn.shape[0] + 1)),
                       fmt='%{}.{}e'.format(width, precision - 1))

        # Write fidelities to file
        if fidelity_on:
            np.savetxt(F2_file,
                       np.concatenate(([h], F2)).reshape((1, F2.shape[0] + 1)),
                       fmt='%{}.{}e'.format(width, precision - 1))

        # Write overlap distribution to file
        if overlap_on:
            Pq_line = np.zeros(1 + 2 * len(Pq))
            Pq_line[0] = h
            Pq_line[1::2] = Pq
            Pq_line[2::2] = Pq_err
            np.savetxt(Pq_file,
                       Pq_line.reshape((1, Pq_line.shape[0])),
                       fmt='%{}.{}e'.format(width, precision - 1))

    #######################################################

    # Close files
    ###################################
    out_file.close()
    if save_state:
        state_file.close()
    if entropy_on:
        Svn_file.close()
    if fidelity_on:
        F2_file.close()
    if overlap_on:
        Pq_file.close()
예제 #7
0
def tfim_analysis(L,
                  Jij_seed,
                  perturbation_order,
                  h_x_range=np.arange(0, 0.005, 0.0001),
                  PBC=True,
                  J=1):
    #Initialize the output dictionary containing all the information that we want to know about a specific instance
    #   - isEmpty
    #   - isWorking
    #   both of which contains logical True or False values

    #Initial set up
    info = {}
    # Configure the number of spins to the correct format for analysis
    L = [L]

    # Build lattice and basis
    ###################################
    lattice = tfim.Lattice(L, PBC)
    N = lattice.N
    basis = tfim.IsingBasis(lattice)
    ###################################

    # construct random J matrix
    Jij = tfim.Jij_instance(N, J, "bimodal", Jij_seed)

    # List out all the spin_states, corresponding indices and energies
    Energies = -tfim.JZZ_SK_ME(basis, Jij)
    # for index in range(2 ** N):
    #     print(index, basis.state(index), Energies[index])
    GS_energy, GS_indices = tfim_perturbation.GS(Energies)

    # Specify perturbation order
    if perturbation_order == 3:
        analysis_func = tfim_perturbation.app_3_eigensystem_general_matrices
        H_app_3 = tfim_perturbation.H_app_3(basis, Jij, GS_indices, N,
                                            GS_energy)
        isEmpty = np.allclose(H_app_3,
                              np.zeros((len(GS_indices), len(GS_indices))))
    elif perturbation_order == 4:
        analysis_func = tfim_perturbation.app_4_eigensystem_general_matrices
        isEmpty = False
    # Check to see if the max order perturbative term is empty and store this information in "info"
    info['isEmpty'] = isEmpty

    # Calculate approximated eigenvalues and eigenstates for range(h_x)
    app_eigenvalues, app_eigenstates = analysis_func(GS_indices, GS_energy,
                                                     h_x_range, J, N, basis,
                                                     Jij)
    # Calculate exact eigenvalues and eigenstates for range(h_x)
    exc_eigenvalues, exc_eigenstates = tfim_perturbation.exc_eigensystem(
        basis, h_x_range, lattice, Energies)

    # Extract exact ground states
    exc_GS_eigenstates = np.zeros(
        (len(h_x_range), len(GS_indices), len(GS_indices)))

    for i in range(len(h_x_range)):
        for m, j in enumerate(GS_indices):
            for n, k in enumerate(GS_indices):
                exc_GS_eigenstates[i, m, n] = exc_eigenstates[i, j, n]

    # Extract exact ground energy
    reordered_app_eigenstates = np.zeros(
        [len(h_x_range), len(GS_indices),
         len(GS_indices)])
    epsilon = 1 * 10**(-6)

    for h_x_index in range(len(h_x_range)):
        if h_x_index < 2:
            reordered_app_eigenstates[h_x_index] = app_eigenstates[h_x_index]
        else:
            for k in range(len(GS_indices) // 2):
                fidelity_array = []
                for v1 in [
                        reordered_app_eigenstates[h_x_index - 1, :, 2 * k],
                        reordered_app_eigenstates[h_x_index - 1, :, 2 * k + 1]
                ]:
                    for v2 in [
                            app_eigenstates[h_x_index, :, 2 * k],
                            app_eigenstates[h_x_index, :, 2 * k + 1]
                    ]:
                        fidelity_array = np.append(
                            fidelity_array, tfim_perturbation.fidelity(v1, v2))
                if abs(fidelity_array[0] - max(fidelity_array)) < epsilon:
                    reordered_app_eigenstates[
                        h_x_index, :, 2 * k] = app_eigenstates[h_x_index, :,
                                                               2 * k]
                    reordered_app_eigenstates[h_x_index, :,
                                              2 * k + 1] = app_eigenstates[
                                                  h_x_index, :, 2 * k + 1]
                else:
                    reordered_app_eigenstates[
                        h_x_index, :, 2 * k] = app_eigenstates[h_x_index, :,
                                                               2 * k + 1]
                    reordered_app_eigenstates[h_x_index, :,
                                              2 * k + 1] = app_eigenstates[
                                                  h_x_index, :, 2 * k]

    reordered_exc_GS_eigenstates = np.zeros(
        [len(h_x_range), len(GS_indices),
         len(GS_indices)])
    epsilon = 1 * 10**(-12)

    for h_x_index in range(len(h_x_range)):
        if h_x_index < 2:
            reordered_exc_GS_eigenstates[h_x_index] = exc_GS_eigenstates[
                h_x_index]
        else:
            for k in range(len(GS_indices) // 2):
                fidelity_array = []
                for v1 in [
                        reordered_exc_GS_eigenstates[h_x_index - 1, :, 2 * k],
                        reordered_exc_GS_eigenstates[h_x_index - 1, :,
                                                     2 * k + 1]
                ]:
                    for v2 in [
                            exc_GS_eigenstates[h_x_index, :, 2 * k],
                            exc_GS_eigenstates[h_x_index, :, 2 * k + 1]
                    ]:
                        fidelity_array = np.append(
                            fidelity_array, tfim_perturbation.fidelity(v1, v2))
                if abs(fidelity_array[0] - max(fidelity_array)) < epsilon:
                    reordered_exc_GS_eigenstates[
                        h_x_index, :, 2 * k] = exc_GS_eigenstates[h_x_index, :,
                                                                  2 * k]
                    reordered_exc_GS_eigenstates[
                        h_x_index, :,
                        2 * k + 1] = exc_GS_eigenstates[h_x_index, :,
                                                        2 * k + 1]
                else:
                    reordered_exc_GS_eigenstates[
                        h_x_index, :, 2 * k] = exc_GS_eigenstates[h_x_index, :,
                                                                  2 * k + 1]
                    reordered_exc_GS_eigenstates[
                        h_x_index, :,
                        2 * k + 1] = exc_GS_eigenstates[h_x_index, :, 2 * k]
    # Calculate and plot energy errors
    corrected_exc_eigenvalues = np.zeros((len(GS_indices), len(h_x_range)))

    for i in range(len(GS_indices)):
        for j in range(len(h_x_range)):
            corrected_exc_eigenvalues[i, j] = exc_eigenvalues[i, j]

    error_array = np.absolute(corrected_exc_eigenvalues - app_eigenvalues)

    # Curve fit
    coeff_matrix = np.zeros((len(GS_indices), 2))
    for i in range(len(GS_indices)):
        pars, cov = curve_fit(f=power_law,
                              xdata=h_x_range,
                              ydata=error_array[i])
        coeff_matrix[i] = pars

    # Check to see if perturbation is working and store it in the info dictionary
    if info['isEmpty'] == False:
        judgment, error_classical_GS_index = isWorking(coeff_matrix,
                                                       perturbation_order)
        info['isWorking'] = bool(judgment)
        info['error state index'] = error_classical_GS_index
        info['error order'] = coeff_matrix[error_classical_GS_index, 1]
    else:
        info['isWorking'] = None
        info['error order'] = None
        info['error state index'] = None

    # return info dictionary
    return info, coeff_matrix[:, 1]
예제 #8
0
def main():

    parser = argparse.ArgumentParser()
    parser.add_argument('yheight', type=int, help='Height of grid')
    parser.add_argument('xwidth', type=int, help='Width of grid')
    parser.add_argument('initial_seed', type=int, help='First Jij seed')
    parser.add_argument('seed_range', type=int, help='Number of seeds')
    args = parser.parse_args()

    PBC = True
    yheight = args.yheight
    xwidth = args.xwidth
    L = [yheight, xwidth]
    lattice = tfim.Lattice(L, PBC)
    N = lattice.N
    basis = tfim.IsingBasis(lattice)
    initial = args.initial_seed
    num_seeds = args.seed_range
    center = (500, 375)
    ground_states = {}
    num_missing = 0

    different = []

    f = open("ground_states.txt", "w+")

    for seed in range(initial, initial + num_seeds):

        bonds = bond_list(seed, N, PBC, xwidth, yheight)

        Jij = make_Jij(N, bonds, lattice)

        coordList = spinCoords(center, xwidth, yheight)

        plaq = make_plaquettes(PBC, lattice, N, xwidth, yheight)

        f_plaq = frustrated(Jij, plaq)

        node_pairs = plaq_pairing(f_plaq, coordList, PBC, xwidth, yheight)

        init_ground = initial_ground(node_pairs, xwidth, yheight)

        p_pairings = init_ground[0]

        ground_distance = init_ground[1]

        edges = viable_edges(node_pairs, p_pairings, ground_distance, f_plaq,
                             xwidth, yheight)

        matchings = plaq_groups(edges, f_plaq, ground_distance)

        string_groups = add_all_strings(matchings, lattice, coordList)

        b_bonds = broken_bonds(string_groups, N, coordList, xwidth, yheight)

        true_ground = make_config(b_bonds, Jij, N, xwidth, lattice,
                                  string_groups)

        ground_config = true_ground[0]

        true_ground_strings = true_ground[1]

        number_ground_states = len(true_ground_strings)

        if number_ground_states == 0:  #This means that we assumed a total string length that did not produce any actual ground states. The strategy to fix that is to add to the total string length until we find ground states.
            found_ground = False
            inc = True

        else:
            found_ground = True  #This is the part where it works
            inc = False
            #print("Ground distance for something that's working : ", ground_distance)

        #Below is the piece that isn't working as well. It tends to return some correct ground states, but has not worked fully yet. You can check things with the tfim code, but it takes a while for that to return anything. I wouldn't use tfim for anything too far beyond a 4x4 system, or it takes too long.
        '''
        incremented = 0
        
        while found_ground == False:
            incremented += 1
            ground_distance += 1     #This adds to the total string distance that we are assuming will                          #produce a ground state
            if ground_distance > 100:
                break   #Breaks out of the whole thing if the code has run too far
                
            edges = viable_edges(node_pairs, p_pairings, ground_distance, f_plaq, xwidth, yheight)
            
            if len(edges) != 0:
                
                matchings = plaq_groups(edges, f_plaq, ground_distance)
                
                string_groups = add_all_strings(matchings, lattice, coordList)

                b_bonds = broken_bonds(string_groups, N, coordList, xwidth, yheight)
                
                true_ground = make_config(b_bonds, Jij, N, xwidth, lattice, string_groups)
                
                ground_config = true_ground[0]
                
                true_ground_strings = true_ground[1]
                
                number_ground_states = len(true_ground_strings)
                
                if number_ground_states != 0:  #If it finds ground states, we can break out of the while                             #loop
                    found_ground = True
        '''
        #If you are running with the incremented states, you would change this so that incremented states can also get in (inc stands for incremented and means that we increased the initial minimum assumed string length)
        if not inc:
            ground_states.update({seed: ground_config})
            ground_config = list(set(ground_config))
            ground_config.sort()

            #This whole piece will check whether you are returning ground states that are correct by checking them against tfim. Again, don't run this bit if you are doing much beyond a 4x4 system or tfim takes forever
            Jij2 = Jij_convert(Jij, N)  #gives the Jij matrix in tfim's form
            Energies = -1 * tfim.JZZ_SK_ME(basis, Jij2)
            number_ground = num_ground_states(Energies)[0]
            states = num_ground_states(Energies)[1]
            states.sort()
            mod_states = convert_states(states, basis)
            mod_states.sort()
            print("From tfim: ", mod_states)

            if ground_config != mod_states:
                different.append(seed)
                print(
                    "Different!")  #Lets you know when this code isn't working

    print("Different seeds: ", different)
    f.write(json.dumps(ground_states))
    f.close()
예제 #9
0
def main():

    # Parse command line arguements
    ###################################
    parser = argparse.ArgumentParser(
        description=("Builds matrices for "
                     "transverse field Ising Models of the form:\n"
                     "H = -\sum_{ij} J_{ij}\sigma^z_i \sigma^z_j"
                     "- h \sum_i \sigma^x_i"))
    parser.add_argument('L', type=int, help='Linear dimensions of the system')
    parser.add_argument('-D',
                        type=int,
                        default=1,
                        help='Number of spatial dimensions')
    parser.add_argument('--obc',
                        action='store_true',
                        help='Open boundary condintions (deault is PBC)')
    parser.add_argument('-J',
                        type=float,
                        default=1.0,
                        help='Nearest neighbor Ising coupling')
    parser.add_argument('-o', default='output', help='output filename base')
    args = parser.parse_args()
    ###################################

    # Set calculation Parameters
    ###################################
    out_filename_base = args.o
    D = args.D
    L = [args.L for d in range(D)]
    PBC = not args.obc
    J = args.J
    parameter_string = "D = {}, L = {}, PBC = {}, J = {}".format(D, L, PBC, J)
    print('\tStarting tfim_build using parameters:\t' + parameter_string)
    ###################################

    # Set up file formatting
    ##################################
    width = 25
    precision = 16
    header = tfim.build_header(L, PBC, J)
    ##################################

    # Build lattice and basis
    ###################################
    lattice = tfim.Lattice(L, PBC)
    N = lattice.N
    basis = tfim.IsingBasis(lattice)
    ###################################

    # Compute diagonal matrix elements
    ###################################
    print('\tBuilding diagonal matrices...')
    Mz_ME, Ms_ME = tfim.z_magnetizations_ME(lattice, basis)
    JZZ_ME, ZZ_ME = tfim.z_correlations_NN_ME(lattice, basis, J)

    # Write to disk
    columns = ['JZZ', 'ZZ', 'Mz', 'Ms']
    diagonal_arr = np.array([JZZ_ME, ZZ_ME, Mz_ME, Ms_ME]).T
    diag_filename = out_filename_base + tfim.diag_ME_suffix
    col_labels = ''.join([
        '{:>{width}}'.format(tfim.phys_labels[key], width=(width + 1))
        for key in columns
    ])[3:]
    print("\tWriting diagonal matrix elements to {}".format(diag_filename))
    np.savetxt(diag_filename,
               diagonal_arr,
               header=(header + col_labels),
               fmt='%{}.{}e'.format(width, precision - 1))
    ###################################

    # Compute off-diagonal matrix elements
    ###################################
    print('\tBuilding off-diagonal matrices...')
    Mx = tfim.build_Mx(lattice, basis)

    # Write to disk
    Mx_filename = out_filename_base + tfim.Mx_suffix
    print("\tWriting off-diagonal matrix to {}".format(Mx_filename))
    tfim.save_sparse_matrix(Mx_filename, Mx)
예제 #10
0
def main():
    # Parse command line arguments
    ###################################

    parser = argparse.ArgumentParser(description=(
        "Build approximate matrices using first and second order perturbation theory and return its eigenvalues and eigenstates"
    ))

    parser.add_argument('lattice_specifier',
                        help=("Linear dimensions of the system"))

    parser.add_argument('-PBC', type=bool, default=True, help="Specifying PBC")

    parser.add_argument('-J',
                        type=float,
                        default=1.0,
                        help='Nearest neighbor Ising coupling')

    parser.add_argument(
        '-seed',
        type=int,
        default=5,
        help="Specifying the seed for generating random Jij matrices")

    parser.add_argument('--h_min',
                        type=float,
                        default=0.0,
                        help='Minimum value of the transverse field')
    parser.add_argument('--h_max',
                        type=float,
                        default=0.1,
                        help='Maximum value of the transverse field')
    parser.add_argument('--dh',
                        type=float,
                        default=0.01,
                        help='Tranverse fied step size')

    parser.add_argument('-o', default='output', help='output filename base')

    parser.add_argument('-d',
                        default='Output_file',
                        help='output directory base')

    args = parser.parse_args()

    ###################################
    # Parameter specification
    out_filename_base = args.o

    # Transverse field
    h_x_range = np.arange(args.h_min, args.h_max + args.dh / 2, args.dh)

    L = [int(args.lattice_specifier)]
    PBC = args.PBC
    J = args.J
    seed = args.seed

    # Build lattice and basis
    lattice = tfim.Lattice(L, PBC)
    N = lattice.N
    basis = tfim.IsingBasis(lattice)

    # Construct random J matrix
    Jij = tfim.Jij_instance(N, J, "bimodal", seed)
    ###################################

    Energies = -tfim.JZZ_SK_ME(basis, Jij)
    GS_energy, GS_indices = tfim_perturbation.GS(Energies)

    ###################################

    H_0 = tfim_perturbation.H_0(GS_energy, GS_indices)
    H_app_1 = tfim_perturbation.H_app_1(basis, GS_indices, N)
    H_app_2 = tfim_perturbation.H_app_2(basis, Jij, GS_indices, N, GS_energy)

    ###################################
    # Diagonalization loop over h_x_range on H_app

    app_eigenvalues = np.zeros((len(GS_indices), len(h_x_range)))
    app_eigenstates = np.zeros(
        (len(GS_indices), len(GS_indices), len(h_x_range)))

    for j, h_x in enumerate(h_x_range):
        H_app = tfim_perturbation.H_app(h_x, H_0, H_app_1, H_app_2, J)
        app_eigenvalue, app_eigenstate = np.linalg.eigh(H_app)
        for i in range(len(GS_indices)):
            app_eigenvalues[i][j] = app_eigenvalue[i]
            for k in range(len(GS_indices)):
                app_eigenstates[i][k][j] = app_eigenstate[i][k]

    ###################################
    # Make output directory

    Output = args.d

    os.mkdir(Output)
    os.chdir(Output)

    ###################################
    # Output Eigenvalue file

    out_filename_E = out_filename_base + '.dat'

    # Quantities to write Eigenvalue ouput file

    phys_keys_E = []

    for i in range(len(app_eigenvalues)):
        eigenvalue_num = 'Eigenvalue ' + str(i + 1)
        phys_keys_E.append(eigenvalue_num)

    phys_keys_E.insert(0, 'h_x')
    phys_E = {}  # Dictionary for values

    # Setup output Eigenvalue data files

    parameter_string = ("L = {}, PBC = {}, J = {}".format(L, PBC, J))

    width = 25
    precision = 16
    header_list = phys_keys_E
    header = ''.join(
        ['{:>{width}}'.format(head, width=width) for head in header_list])
    out_eigenvalue_file = open(out_filename_E, 'w')
    print("\tData will write to {}".format(out_filename_E))
    out_eigenvalue_file.write('#\ttfim_diag parameters:\t' + parameter_string +
                              '\n' + '#' + header[1:] + '\n')

    # Put eigenvalues in phys_E dictionary

    for i, h_x in enumerate(h_x_range):
        phys_E['h_x'] = h_x
        for j, key in enumerate(phys_keys_E[1:]):
            phys_E[key] = app_eigenvalues[j, i]

    # Write eigenvalues to output files

        data_list = [phys_E[key] for key in phys_keys_E]
        data_line = ''.join([
            '{:{width}.{prec}e}'.format(data, width=width, prec=precision)
            for data in data_list
        ])
        out_eigenvalue_file.write(data_line + '\n')

    # Close files

    out_eigenvalue_file.close()

    ###################################

    # Output Eigenstate files

    for file_num in range(len(app_eigenstates)):

        out_filename_V = out_filename_base + '_' + str(file_num) + '.dat'

        # Quantities to write Eigenstate output file

        phys_keys_V = []

        for i in range(len(app_eigenstates)):
            basis = 'Basis ' + str(i + 1)
            phys_keys_V.append(basis)

        phys_keys_V.insert(0, 'h_x')
        phys_V = {}  # Dictionary for values

        # Setup output Eigenstate data files

        parameter_string = ("L = {}, PBC = {}, J = {}".format(L, PBC, J))

        width = 25
        precision = 16
        header_list = phys_keys_V
        header = ''.join(
            ['{:>{width}}'.format(head, width=width) for head in header_list])
        out_eigenstate_file = open(out_filename_V, 'w')
        print("\tData will write to {}".format(out_filename_V))
        out_eigenstate_file.write('#\ttfim_diag parameters:\t' +
                                  parameter_string + '\n' + '#' + header[1:] +
                                  '\n')

        # Put eigenstates in phys_V dictionary

        for i, h_x in enumerate(h_x_range):
            phys_V['h_x'] = h_x
            for j, key in enumerate(phys_keys_V[1:]):
                phys_V[key] = app_eigenstates[file_num][j, i]

        # Write eigenvalues to output files

            data_list = [phys_V[key] for key in phys_keys_V]
            data_line = ''.join([
                '{:{width}.{prec}e}'.format(data, width=width, prec=precision)
                for data in data_list
            ])
            out_eigenstate_file.write(data_line + '\n')

        # Close files

        out_eigenstate_file.close()

    #######################################################

    # Exit "Output" directory

    os.chdir("../")
예제 #11
0
def main():

    #Add Arguments
    parser = argparse.ArgumentParser()

    parser.add_argument('-N', type=int,default = 16, help = 'Number of spins')

    parser.add_argument('-C', type = int, default =1, help = 'J_Matrix confgiuration number')

    parser.add_argument('-S', type = int, default = 500, help = 'Annealing Rate in number of samples')

    parser.add_argument('--Seed', type = int, default = 3, help = "Random number seed")

    parser.add_argument('--EQ',type = int,default = 0, help = 'Equilibration time for Monte Carlo Sampling')

    parser.add_argument('--BStart', type = int,default = 0.1, help = "Starting temperature Beta")

    parser.add_argument('--BEnd', type = int, default = 10, help = "Ending temperature Beta")

    parser.add_argument('--Constant', type = bool, default = False, help = "Constant Temperature Sampling? (True/False)")

    args = parser.parse_args()



    #Parse arguments
    num_of_spins = args.N
    configuration = args.C
    Sample_Size = args.S
    seed = args.Seed
    J_BetaS = args.BStart
    J_BetaE = args.BEnd
    EQTime = args.EQ
    Constant = args.Constant




    #Load J Matrix configuration/ initiate basis
    J_Matrix = np.loadtxt("J_Matrices/J"+str(num_of_spins)+"/J_Matrix"+str(configuration)+"/"+str(num_of_spins)+"J_Matrix"+str(configuration)+".dat")
    lattice = tfim.Lattice([num_of_spins],True)
    basis = tfim.IsingBasis(lattice)


    #Deal with where to put files
    AnnDirectory = "J_Matrices/J"+str(num_of_spins)+"/J_Matrix"+str(configuration)+"/AnnRate"+str(Sample_Size)
    SeedDirectory = AnnDirectory + "/Seed" + str(seed)

    if not os.path.isdir(AnnDirectory):
        os.mkdir(AnnDirectory)
        os.mkdir(SeedDirectory)
    elif not os.path.isdir(SeedDirectory):
        os.mkdir(SeedDirectory)

    if Constant:
        RunFile = SeedDirectory + "/ConstMCSamples.dat"
        StatsFile = SeedDirectory + "/ConstMCStats.dat"
        SuccessFile = AnnDirectory + "/ConstSuccessRate.dat"
    else:
        RunFile = SeedDirectory + "/AnnMCSamples.dat"
        StatsFile = SeedDirectory + "/AnnMCStats.dat"
        SuccessFile = AnnDirectory + "/AnnSuccessRate.dat"



    minimum = np.loadtxt("J_Matrices/J"+str(num_of_spins)+"/J_Matrix"+str(configuration)+"/Minima.dat")[-1]

    success_numbers = np.array([[seed,0]])



    #Successfile is a running log, it's updated every time a different seed is run
    if os.path.exists(SuccessFile):
        seed_array = np.loadtxt(SuccessFile,ndmin = 2)


        duplicate = False
        i=0
        while i < seed_array.shape[0]:
            if seed_array[i][0] == seed:
                duplicate = True
                break
            i+=1
        if not duplicate:
            foundmin = ann.Monte_Carlo(lattice,basis,J_BetaS,J_BetaE,Sample_Size,seed,EQTime,J_Matrix,RunFile,StatsFile,Constant)
            if foundmin==minimum:
                success_numbers[0][1] = 1
            seed_array = np.concatenate((seed_array,success_numbers))

    else:
        foundmin = ann.Monte_Carlo(lattice,basis,J_BetaS,J_BetaE,Sample_Size,seed,EQTime,J_Matrix,RunFile,StatsFile,Constant)
        if foundmin==minimum:
            success_numbers[0][1] = 1
        seed_array = success_numbers



    header = str(np.sum(seed_array,axis=0)[1] / seed_array.shape[0])

    np.savetxt(SuccessFile,seed_array,delimiter = "\t", header = header)