Пример #1
0
def load_cgpt(*a):
    result = []
    r, metadata = cgpt.load(*a, gpt.default.is_verbose("io"))
    if r is None:
        raise gpt.LoadError()
    for gr in r:
        grid = gpt.grid(gr[1], eval("gpt." + gr[2]), eval("gpt." + gr[3]),
                        gr[0])
        result_grid = []
        otype = gpt.ot_matrix_su_n_fundamental_group(3)
        for t_obj, s_ot, s_pr in gr[4]:
            assert s_pr == gr[2]

            # only allow loading su3 gauge fields from cgpt, rest done in python
            # in the long run, replace *any* IO from cgpt with gpt code
            assert s_ot == "ot_mcolor3"

            l = gpt.lattice(grid, otype, [t_obj])
            l.metadata = metadata
            result_grid.append(l)
        result.append(result_grid)
    while len(result) == 1:
        result = result[0]
    return result
Пример #2
0
def separate_indices(x, st):
    pos = gpt.coordinates(x)
    cb = x.checkerboard()
    assert st is not None
    result_otype = st[-1]()
    if result_otype is None:
        return x
    ndim = x.otype.shape[st[0]]
    rank = len(st) - 1
    islice = [slice(None, None, None) for i in range(len(x.otype.shape))]
    ivec = [0] * rank
    result = {}
    for i in range(ndim ** rank):
        idx = i
        for j in range(rank):
            c = idx % ndim
            islice[st[j]] = c
            ivec[j] = c
            idx //= ndim
        v = gpt.lattice(x.grid, result_otype)
        v.checkerboard(cb)
        v[pos] = x[(pos,) + tuple(islice)]
        result[tuple(ivec)] = v
    return result
Пример #3
0
Файл: io.py Проект: spieseba/gpt
# Show metadata of field
# g.message("Metadata", U[0].metadata)
rng = g.random("test")
U = g.qcd.gauge.random(g.grid([8, 8, 8, 16], g.double), rng)

# create a sparse sub-domain and a sparse lattice S with 1% of points
sdomain = g.domain.sparse(
    U[0].grid,
    rng.choice(g.coordinates(U[0]), int(0.01 * U[0].grid.gsites / U[0].grid.Nprocessors)),
)

# test sparse domain
S = sdomain.lattice(U[0].otype)
sdomain.project(S, U[0])
U0prime = g.lattice(U[0])
U0prime[:] = 0
sdomain.promote(U0prime, S)
assert np.linalg.norm(U0prime[sdomain.local_coordinates] - U[0][sdomain.local_coordinates]) < 1e-14
s_slice = sdomain.slice(S, 3)

# save in default gpt format
g.save(
    f"{work_dir}/out",
    {
        "va\nl": [
            0,
            1,
            3,
            "tes\n\0t",
            3.123456789123456789,
Пример #4
0
#!/usr/bin/env python3
#
# Authors: Christoph Lehner 2020
#
# Desc.: Illustrate core concepts and features
#
import gpt as g
import numpy as np

# load configuration
rng = g.random("test")
U = g.qcd.gauge.random(g.grid([8, 8, 8, 16], g.double), rng)
V = rng.element(g.lattice(U[0]))
U_transformed = g.qcd.gauge.transformed(U, V)

# reference plaquette
P = g.qcd.gauge.plaquette(U)

# test rectangle calculation using parallel transport and copy_plan
R_1x1, R_2x1 = g.qcd.gauge.rectangle(U, [(1, 1), (2, 1)])
eps = abs(P - R_1x1)
g.message(f"Plaquette {P} versus 1x1 rectangle {R_1x1}: {eps}")
assert eps < 1e-13

# Test gauge invariance of plaquette
P_transformed = g.qcd.gauge.plaquette(U_transformed)
eps = abs(P - P_transformed)
g.message(
    f"Plaquette before {P} and after {P_transformed} gauge transformation: {eps}"
)
assert eps < 1e-13
Пример #5
0
     cmath.exp(3j), cmath.exp(4j)],
}

# pf=g.params("~/gpt/tests/wilson.txt")
# print(pf)

# take slow reference implementation of wilson (kappa = 1/2/(m0 + 4) ) ;
w_ref = g.qcd.fermion.reference.wilson_clover(U, p)

# and fast Grid version
w = g.qcd.fermion.wilson_clover(U, p, kappa=0.137)

# create point source
src = rng.cnormal(g.vspincolor(grid))

dst_ref, dst = g.lattice(src), g.lattice(src)

# correctness
dst_ref @= w_ref * src
dst @= w * src

eps = g.norm2(dst - dst_ref) / g.norm2(dst)
g.message("Test wilson versus reference:", eps)
assert eps < 1e-13

# now timing
t0 = g.time()
for i in range(100):
    w_ref(dst_ref, src)
t1 = g.time()
for i in range(100):
Пример #6
0
    V = [g.matrix_su2_adjoint(grid) for x in U]
    for i in range(3):
        g.convert(
            V[i], U[i]
        )  # this used to be a separate function: fundamental_to_adjoint
        check_unitarity(V[i], eps_ref)
        check_representation(V[i], eps_ref)

    # check if fundamental_to_adjoint is a homomorphism
    eps = (g.norm2(V[2] - V[0] * V[1]) / g.norm2(V[2]))**0.5
    g.message(f"Test fundamental_to_adjoint is homomorphism: {eps}")
    assert eps < eps_ref

    a = [
        g.lattice(V[0].grid, g.ot_matrix_su_n_fundamental_algebra(2))
        for i in range(3)
    ]
    g.convert(a, U)

    V_c = []
    for i in range(3):
        # convert through canonical coordinates
        coor = a[i].otype.coordinates(a[i])
        a_adj = g.lattice(a[i].grid, g.ot_matrix_su_n_adjoint_algebra(2))
        a_adj.otype.coordinates(a_adj, coor)
        v = g.convert(a_adj, g.ot_matrix_su_n_adjoint_group(2))
        check_unitarity(v, eps_ref)
        check_representation(v, eps_ref)
        V_c.append(v)
Пример #7
0
def create_links(A, fmat, basis, params):
    # NOTE: we expect the blocks in the basis vectors
    # to already be orthogonalized!
    # parameters
    make_hermitian = params["make_hermitian"]
    save_links = params["save_links"]
    assert not (make_hermitian and not save_links)

    # verbosity
    verbose = gpt.default.is_verbose("coarsen")

    # setup timings
    t = gpt.timer("coarsen")
    t("setup")

    # get grids
    f_grid = basis[0].grid
    c_grid = A[0].grid

    # directions/displacements we coarsen for
    dirs = [1, 2, 3, 4] if f_grid.nd == 5 else [0, 1, 2, 3]
    disp = +1
    dirdisps_full = list(zip(dirs * 2, [+1] * 4 + [-1] * 4))
    dirdisps_forward = list(zip(dirs, [disp] * 4))
    nhops = len(dirdisps_full)
    selflink = nhops

    # setup fields
    Mvr = [gpt.lattice(basis[0]) for i in range(nhops)]
    tmp = gpt.lattice(basis[0])
    oproj = gpt.vcomplex(c_grid, len(basis))
    selfproj = gpt.vcomplex(c_grid, len(basis))

    # setup masks
    onemask, blockevenmask, blockoddmask = (
        gpt.complex(f_grid),
        gpt.complex(f_grid),
        gpt.complex(f_grid),
    )
    dirmasks = [gpt.complex(f_grid) for p in range(nhops)]

    # auxilliary stuff needed for masks
    t("masks")
    onemask[:] = 1.0
    coor = gpt.coordinates(blockevenmask)
    block = numpy.array(f_grid.ldimensions) / numpy.array(c_grid.ldimensions)
    block_cb = coor[:, :] // block[:]

    # fill masks for sites within even/odd blocks
    gpt.coordinate_mask(blockevenmask, numpy.sum(block_cb, axis=1) % 2 == 0)
    blockoddmask @= onemask - blockevenmask

    # fill masks for sites on borders of blocks
    dirmasks_forward_np = coor[:, :] % block[:] == block[:] - 1
    dirmasks_backward_np = coor[:, :] % block[:] == 0
    for mu in dirs:
        gpt.coordinate_mask(dirmasks[mu], dirmasks_forward_np[:, mu])
        gpt.coordinate_mask(dirmasks[mu + 4], dirmasks_backward_np[:, mu])

    # save applications of matrix and coarsening if possible
    dirdisps = dirdisps_forward if save_links else dirdisps_full

    # create block maps
    t("blockmap")
    dirbms = [
        gpt.block.map(c_grid, basis, dirmasks[p])
        for p, (mu, fb) in enumerate(dirdisps)
    ]
    fullbm = gpt.block.map(c_grid, basis)

    for i, vr in enumerate(basis):
        # apply directional hopping terms
        # this triggers len(dirdisps) comms -> TODO expose DhopdirAll from Grid
        # BUT problem with vector<Lattice<...>> in rhs
        t("apply_hop")
        [fmat.Mdir(*dirdisp)(Mvr[p], vr) for p, dirdisp in enumerate(dirdisps)]

        # coarsen directional terms + write to link
        for p, (mu, fb) in enumerate(dirdisps):
            t("coarsen_hop")
            dirbms[p].project(oproj, Mvr[p])

            t("copy_hop")
            A[p][:, :, :, :, :, i] = oproj[:]

        # fast diagonal term: apply full matrix to both block cbs separately and discard hops into other cb
        t("apply_self")
        tmp @= (blockevenmask * fmat * vr * blockevenmask +
                blockoddmask * fmat * vr * blockoddmask)

        # coarsen diagonal term
        t("coarsen_self")
        fullbm.project(selfproj, tmp)

        # write to self link
        t("copy_self")
        A[selflink][:, :, :, :, :, i] = selfproj[:]

        if verbose:
            gpt.message("coarsen: done with vector %d" % i)

    # communicate opposite links
    if save_links:
        t("comm")
        communicate_links(A, dirdisps_forward, make_hermitian)

    t()

    if verbose:
        gpt.message(t)
Пример #8
0
# basis
n = 30
res = None
tmpf_prev = None
for dtype in [msc, vc12]:
    g.message(f"Data type {dtype.__name__}")
    basis = [dtype() for i in range(n)]
    rng = g.random("block_seed_string_13")
    rng.cnormal(basis)
    for i in range(2):
        g.message("Ortho step %d" % i)
        g.block.orthonormalize(coarse_grid, basis)

    # test coarse vector
    lcoarse = g.vcomplex(coarse_grid, n)
    rng.cnormal(lcoarse)

    # temporary fine and coarse vectors
    tmpf = g.lattice(basis[0])
    lcoarse2 = g.lattice(lcoarse)

    # coarse-to-fine-to-coarse
    g.block.promote(lcoarse, tmpf, basis)
    g.block.project(lcoarse2, tmpf, basis)

    # report error
    err2 = g.norm2(lcoarse - lcoarse2) / g.norm2(lcoarse)
    g.message(err2)
    assert err2 < 1e-12
Пример #9
0
    def __call__(self, mat, src, ckpt=None):

        # verbosity
        verbose = g.default.is_verbose("irl")

        # checkpointer
        if ckpt is None:
            ckpt = g.checkpointer_none()
        ckpt.grid = src.grid
        self.ckpt = ckpt

        # first approximate largest eigenvalue
        pit = g.algorithms.eigen.power_iteration(eps=0.05,
                                                 maxiter=10,
                                                 real=True)
        lambda_max = pit(mat, src)[0]

        # parameters
        Nm = self.params["Nm"]
        Nk = self.params["Nk"]
        Nstop = self.params["Nstop"]
        assert Nm >= Nk and Nstop <= Nk

        # tensors
        dtype = np.float64
        lme = np.empty((Nm, ), dtype)
        lme2 = np.empty((Nm, ), dtype)
        ev = np.empty((Nm, ), dtype)
        ev2 = np.empty((Nm, ), dtype)
        ev2_copy = np.empty((Nm, ), dtype)

        # fields
        f = g.lattice(src)
        v = g.lattice(src)
        evec = [g.lattice(src) for i in range(Nm)]

        # advice memory storage
        if not self.params["advise"] is None:
            g.advise(evec, self.params["advise"])

        # scalars
        k1 = 1
        k2 = Nk
        beta_k = 0.0

        # set initial vector
        evec[0] @= src / g.norm2(src)**0.5

        # initial Nk steps
        for k in range(Nk):
            self.step(mat, ev, lme, evec, f, Nm, k)

        # restarting loop
        for it in range(self.params["maxiter"]):
            if verbose:
                g.message("Restart iteration %d" % it)
            for k in range(Nk, Nm):
                self.step(mat, ev, lme, evec, f, Nm, k)
            f *= lme[Nm - 1]

            # eigenvalues
            for k in range(Nm):
                ev2[k] = ev[k + k1 - 1]
                lme2[k] = lme[k + k1 - 1]

            # diagonalize
            t0 = g.time()
            Qt = np.identity(Nm, dtype)
            self.diagonalize(ev2, lme2, Nm, Qt)
            t1 = g.time()

            if verbose:
                g.message("Diagonalization took %g s" % (t1 - t0))

            # sort
            ev2_copy = ev2.copy()
            ev2 = list(reversed(sorted(ev2)))

            # implicitly shifted QR transformations
            Qt = np.identity(Nm, dtype)
            t0 = g.time()
            for ip in range(k2, Nm):
                g.qr_decomposition(ev, lme, Nm, Nm, Qt, ev2[ip], k1, Nm)
            t1 = g.time()

            if verbose:
                g.message("QR took %g s" % (t1 - t0))

            # rotate
            t0 = g.time()
            g.rotate(evec, Qt, k1 - 1, k2 + 1, 0, Nm)
            t1 = g.time()

            if verbose:
                g.message("Basis rotation took %g s" % (t1 - t0))

            # compression
            f *= Qt[k2 - 1, Nm - 1]
            f += lme[k2 - 1] * evec[k2]
            beta_k = g.norm2(f)**0.5
            betar = 1.0 / beta_k
            evec[k2] @= betar * f
            lme[k2 - 1] = beta_k

            if verbose:
                g.message("beta_k = ", beta_k)

            # convergence test
            if it >= self.params["Nminres"]:
                if verbose:
                    g.message("Rotation to test convergence")

                # diagonalize
                for k in range(Nm):
                    ev2[k] = ev[k]
                    lme2[k] = lme[k]
                Qt = np.identity(Nm, dtype)

                t0 = g.time()
                self.diagonalize(ev2, lme2, Nk, Qt)
                t1 = g.time()

                if verbose:
                    g.message("Diagonalization took %g s" % (t1 - t0))

                B = g.copy(evec[0])

                allconv = True
                if beta_k >= self.params["betastp"]:
                    jj = 1
                    while jj <= Nstop:
                        j = Nstop - jj
                        g.linear_combination(B, evec[0:Nk], Qt[j, 0:Nk])
                        B *= 1.0 / g.norm2(B)**0.5
                        if not ckpt.load(v):
                            mat(v, B)
                            ckpt.save(v)
                        ev_test = g.inner_product(B, v).real
                        eps2 = g.norm2(v - ev_test * B) / lambda_max**2.0
                        if verbose:
                            g.message("%-65s %-45s %-50s" % (
                                "ev[ %d ] = %s" % (j, ev2_copy[j]),
                                "<B|M|B> = %s" % (ev_test),
                                "|M B - ev B|^2 / ev_max^2 = %s" % (eps2),
                            ))
                        if eps2 > self.params["resid"]:
                            allconv = False
                        if jj == Nstop:
                            break
                        jj = min([Nstop, 2 * jj])

                if allconv:
                    if verbose:
                        g.message("Converged in %d iterations" % it)
                        break

        t0 = g.time()
        g.rotate(evec, Qt, 0, Nstop, 0, Nk)
        t1 = g.time()

        if verbose:
            g.message("Final basis rotation took %g s" % (t1 - t0))

        return (evec[0:Nstop], ev2_copy[0:Nstop])
Пример #10
0
for s1 in range(4):
    for s2 in range(4):
        eps = np.linalg.norm(
            msc[0, 0, 0, 0].array[s1, s2, :, :] - xs[s1, s2][0, 0, 0, 0].array
        )
        assert eps < 1e-13

for c1 in range(3):
    for c2 in range(3):
        eps = np.linalg.norm(
            msc[0, 0, 0, 0].array[:, :, c1, c2] - xc[c1, c2][0, 0, 0, 0].array
        )
        assert eps < 1e-13


msc2 = g.lattice(msc)

g.merge_spin(msc2, xs)
assert g.norm2(msc2 - msc) < 1e-13

g.merge_color(msc2, xc)
assert g.norm2(msc2 - msc) < 1e-13

assert (
    g.norm2(g.separate_color(xs[1, 2])[2, 0] - g.separate_spin(xc[2, 0])[1, 2]) < 1e-13
)


################################################################################
# Setup lattices
################################################################################
Пример #11
0
    grid = g.grid(g.default.get_ivec("--grid", [16, 16, 16, 32], 4), precision)
    N = 10
    Nwarmup = 5
    g.message(f"""
Matrix Multiply Benchmark with
    fdimensions  : {grid.fdimensions}
    precision    : {precision.__name__}
""")

    # Source and destination
    for tp in [
            g.ot_matrix_color(3),
            g.ot_matrix_spin(4),
            g.ot_matrix_spin_color(4, 3)
    ]:
        one = g.lattice(grid, tp)
        two = g.lattice(grid, tp)
        three = g.lattice(grid, tp)
        rng.cnormal([one, two])

        # Rank inner product
        nbytes = 3.0 * one.global_bytes() * N

        # Time
        dt = 0.0
        for it in range(N + Nwarmup):
            if it >= Nwarmup:
                dt -= g.time()
            g.eval(three, one * two)
            if it >= Nwarmup:
                dt += g.time()
Пример #12
0
for precision in [g.single, g.double]:
    grid = g.grid(g.default.get_ivec("--grid", [16, 16, 16, 32], 4), precision)
    N = 100
    Nwarmup = 5
    g.message(
        f"""
Inner Product Benchmark with
    fdimensions  : {grid.fdimensions}
    precision    : {precision.__name__}
"""
    )

    # Source and destination
    for tp in [g.ot_singlet(), g.ot_vector_spin_color(4, 3), g.ot_vector_singlet(12)]:
        for n in [1, 4]:
            one = [g.lattice(grid, tp) for i in range(n)]
            two = [g.lattice(grid, tp) for i in range(n)]
            rng.cnormal([one, two])

            # Rank inner product
            nbytes = (one[0].global_bytes() + two[0].global_bytes()) * N * n * n
            for use_accelerator, compute_name, access in [
                (False, "host", access_host),
                (True, "accelerator", access_accelerator),
            ]:

                # Time
                dt = 0.0
                cgpt.timer_begin()
                for it in range(N + Nwarmup):
                    access(one)
Пример #13
0
# now define coarse-grid operator
g.message("Test precision of promote-project chain: %g" %
          (g.norm2(cstart - b.project * b.promote * cstart) / g.norm2(cstart)))

g.mem_report()

try:
    cevec, cev = g.load("cevec", {"grids": cgrid})
except g.LoadError:
    cevec, cev = irl(cop, cstart, params["checkpointer"])
    g.save("cevec", (cevec, cev))

# smoother
smoother = params["smoother"](q.NDagN)
nsmoother = params["nsmoother"]
v_fine = g.lattice(basis[0])
v_fine_smooth = g.lattice(basis[0])
try:
    ev3 = g.load("ev3")
except g.LoadError:
    ev3 = [0.0] * len(cevec)
    for i, v in enumerate(cevec):
        v_fine @= b.promote * v
        for j in range(nsmoother):
            v_fine_smooth @= smoother * v_fine
            v_fine @= v_fine_smooth / g.norm2(v_fine_smooth)**0.5
        ev_smooth = g.algorithms.eigen.evals(q.NDagN, [v_fine],
                                             check_eps2=1e-2,
                                             real=True)
        ev3[i] = ev_smooth[0]
        g.message("Eigenvalue %d = %.15g" % (i, ev3[i]))
Пример #14
0
def gpt_object(first, ot):
    if type(first) == gpt.grid:
        return gpt.lattice(first, ot)
    return gpt.tensor(numpy.array(first, dtype=numpy.complex128), ot)
Пример #15
0
    def __call__(self, mat, src, ckpt=None):

        # verbosity
        verbose = g.default.is_verbose("irl")

        # checkpointer
        if ckpt is None:
            ckpt = g.checkpointer_none()
        ckpt.grid = src.grid
        self.ckpt = ckpt

        # first approximate largest eigenvalue
        pit = g.algorithms.eigen.power_iteration(eps=0.05, maxiter=10, real=True)
        lambda_max = pit(mat, src)[0]

        # parameters
        Nm = self.params["Nm"]
        Nu = self.params["Nu"]
        Nk = self.params["Nk"]
        Nstop = self.params["Nstop"]
        Np = Nm-Nk
        MaxIter=self.params["maxiter"]
        Np /= MaxIter
        assert Nm >= Nk and Nstop <= Nk
        print ( 'Nm=',Nm,'Nu=',Nu,'Nk=',Nk )

        # tensors
        dtype = np.float64
        ctype = np.complex128
         
        lme = np.zeros((Nu,Nm), ctype)
        lmd = np.zeros((Nu,Nm), ctype)
        lme2 = np.zeros((Nu,Nm), ctype)
        lmd2 = np.empty((Nu,Nm), ctype)
        Qt = np.zeros((Nm,Nm),ctype)
        Q = np.zeros((Nm,Nm),ctype)
        ev = np.empty((Nm,), dtype)
        ev2_copy = np.empty((Nm,), dtype)

        # fields
        f = g.lattice(src)
        v = g.lattice(src)
        evec = [g.lattice(src) for i in range(Nm)]
        w = [g.lattice(src) for i in range(Nu)]
        w_copy = [g.lattice(src) for i in range(Nu)]

        # advice memory storage
        if not self.params["advise"] is None:
            g.advise(evec, self.params["advise"])

        # scalars
        k1 = 1
        k2 = Nk
        beta_k = 0.0

        rng=g.random("test")
        # set initial vector
#        rng.zn(w)
        for i in range(Nu):
            rng.zn(w[i])
            if i > 0: 
                g.orthogonalize(w[i],evec[0:i])
            evec[i]=g.copy(w[i])
            evec[i] *= 1.0/ g.norm2(evec[i]) ** 0.5
            g.message("norm(evec[%d]=%e "%(i,g.norm2(evec[i])))
            if i > 0: 
                for j in range(i):
                    ip=g.innerProduct(evec[j],w[i])
                    if np.abs(ip) >1e-6:
                        g.message("inner(evec[%d],w[%d])=%e %e"% (j,i,ip.real,ip.imag))
#           evec[i] @= src[i] / g.norm2(src[i]) ** 0.5

        # initial Nk steps
        Nblock_k = int(Nk/Nu)
        for b in range(Nblock_k):
            self.blockStep(mat, lmd, lme, evec, w, w_copy, Nm, b,Nu)

        Nblock_p = int(Np/Nu)
        # restarting loop
#        for it in range(self.params["maxiter"]):
        for it in range(MaxIter):
            if verbose:
                g.message("Restart iteration %d" % it)

            Nblock_l = Nblock_k + it*Nblock_p;
            Nblock_r = Nblock_l + Nblock_p;
            Nl = Nblock_l*Nu
            Nr = Nblock_r*Nu
#           ev2.resize(Nr)
            ev2 = np.empty((Nr,), dtype)

            for b in range(Nblock_l, Nblock_r):
                self.blockStep(mat,  lmd, lme, evec, w, w_copy, Nm, b,Nu)

            for u in range(Nu):
                for k in range(Nr):
                    lmd2[u,k]=lmd[u,k]
                    lme2[u,k]=lme[u,k]


            Qt = np.identity(Nr, ctype)
            
            # diagonalize
            t0 = g.time()
#            self.diagonalize(ev2, lme2, Nm, Qt)
            self.diagonalize(ev2,lmd2,lme2,Nu,Nr,Qt)
#    def diagonalize(self, eval, lmd, lme, Nu, Nk, Nm, Qt):
            t1 = g.time()

            if verbose:
                g.message("Diagonalization took %g s" % (t1 - t0))

            # sort
            ev2_copy = ev2.copy()
            ev2 = list(reversed(sorted(ev2)))

            for i in range(Nr):
                g.message("Rval[%d]= %e"%(i,ev2[i]))

            # rotate
#            t0 = g.time()
#            g.rotate(evec, Qt, k1 - 1, k2 + 1, 0, Nm)
#            t1 = g.time()

#            if verbose:
#                g.message("Basis rotation took %g s" % (t1 - t0))

            # convergence test
            if it >= self.params["Nminres"]:
                if verbose:
                    g.message("Rotation to test convergence")

                # diagonalize
                for k in range(Nr):
                    ev2[k] = ev[k]
            #        lme2[k] = lme[k]
                for u in range(Nu):
                    for k in range(Nr):
                        lmd2[u,k]=lmd[u,k]
                        lme2[u,k]=lme[u,k]
                Qt = np.identity(Nm, ctype)

                t0 = g.time()
#                self.diagonalize(ev2, lme2, Nk, Qt)
                self.diagonalize(ev2,lmd2,lme2,Nu,Nr,Qt)
                t1 = g.time()

                if verbose:
                    g.message("Diagonalization took %g s" % (t1 - t0))

                B = g.copy(evec[0])

                allconv = True
                if beta_k >= self.params["betastp"]:
                    jj = 1
                    while jj <= Nstop:
                        j = Nstop - jj
                        g.linear_combination(B, evec[0:Nr], Qt[j, 0:Nr])
                        g.message("norm=%e"%(g.norm2(B)))
                        B *= 1.0 / g.norm2(B) ** 0.5
                        if not ckpt.load(v):
                            mat(v, B)
                            ckpt.save(v)
                        ev_test = g.innerProduct(B, v).real
                        eps2 = g.norm2(v - ev_test * B) / lambda_max ** 2.0
                        if verbose:
                            g.message(
                                "%-65s %-45s %-50s"
                                % (
                                    "ev[ %d ] = %s" % (j, ev2_copy[j]),
                                    "<B|M|B> = %s" % (ev_test),
                                    "|M B - ev B|^2 / ev_max^2 = %s" % (eps2),
                                )
                            )
                        if eps2 > self.params["resid"]:
                            allconv = False
                        if jj == Nstop:
                            break
                        jj = min([Nstop, 2 * jj])

                if allconv:
                    if verbose:
                        g.message("Converged in %d iterations" % it)
                        break

        t0 = g.time()
        g.rotate(evec, Qt, 0, Nstop, 0, Nk)
        t1 = g.time()

        if verbose:
            g.message("Final basis rotation took %g s" % (t1 - t0))

        return (evec[0:Nstop], ev2_copy[0:Nstop])
Пример #16
0
################################################################################
# Test mview
################################################################################
c = g.coordinates(l_dp)
x = l_dp[c]
mv = g.mview(x)
assert mv.itemsize == 1 and mv.shape[0] == len(mv)
assert sys.getrefcount(x) == 3
del mv
assert sys.getrefcount(x) == 2

################################################################################
# Test assignments
################################################################################
pos = l_dp.mview_coordinates()
lhs = g.lattice(l_dp)


def assign_copy():
    g.copy(lhs, l_dp)


def assign_pos():
    lhs[pos] = l_dp[pos]


def assign_pos_view():
    lhs[pos] = l_dp.view[pos]


for method in [assign_copy, assign_pos, assign_pos_view]:
Пример #17
0
    def read_lattice(self, a):
        g_desc = a[0]
        cv_desc = a[1]
        l_desc = a[2]
        filepos = [int(x) for x in a[3:]]

        # first find grid
        if g_desc not in self.params["grids"]:
            self.params["grids"][g_desc] = gpt.grid_from_description(g_desc)
        g = self.params["grids"][g_desc]

        # create a cartesian view and lattice to load
        l = gpt.lattice(g, l_desc)
        cv0 = gpt.cartesian_view(-1, cv_desc, g.fdimensions, g.cb,
                                 l.checkerboard())

        # find tasks for my node
        views_for_node = self.views_for_node(cv0, g)

        # performance
        dt_distr, dt_crc, dt_read = 0.0, 0.0, 0.0
        szGB = 0.0
        g.barrier()
        t0 = gpt.time()

        # need to load all views
        for xk, iview in enumerate(views_for_node):
            g.barrier()
            dt_read -= gpt.time()

            f, pos = self.open_view(xk, iview, False, cv_desc, g.fdimensions,
                                    g.cb, l.checkerboard())

            cache_key = f"{a[0:3]}_{g.obj}_{iview}_read"
            if cache_key not in self.cache:
                self.cache[cache_key] = {}

            if f is not None:
                f.seek(filepos[iview], 0)
                ntag = int.from_bytes(f.read(4), byteorder="little")
                f.read(ntag)  # not needed if index is present
                crc_exp = int.from_bytes(f.read(4), byteorder="little")
                nd = int.from_bytes(f.read(4), byteorder="little")
                f.read(8 * nd)  # not needed if index is present
                sz = int.from_bytes(f.read(8), byteorder="little")
                data = memoryview(f.read(sz))
                dt_crc -= gpt.time()
                crc_comp = gpt.crc32(data)
                dt_crc += gpt.time()
                assert crc_comp == crc_exp
                sys.stdout.flush()
                szGB += len(data) / 1024.0**3.0
            else:
                assert len(pos) == 0
                data = None

            g.barrier()
            dt_read += gpt.time()
            dt_distr -= gpt.time()
            l[pos, self.cache[cache_key]] = data
            g.barrier()
            dt_distr += gpt.time()

        g.barrier()
        t1 = gpt.time()

        szGB = g.globalsum(szGB)
        if self.verbose and dt_crc != 0.0:
            gpt.message(
                "Read %g GB at %g GB/s (%g GB/s for distribution, %g GB/s for reading + checksum, %g GB/s for checksum, %d views per node)"
                % (
                    szGB,
                    szGB / (t1 - t0),
                    szGB / dt_distr,
                    szGB / dt_read,
                    szGB / dt_crc,
                    len(views_for_node),
                ))

        return l
Пример #18
0
# start vector
cstart = g.vcomplex(cgrid, nbasis)
cstart[:] = g.vcomplex([1] * nbasis, nbasis)

g.mem_report()

# basis
northo = params["northo"]
for i in range(northo):
    g.message("Orthonormalization round %d" % i)
    g.block.orthonormalize(cgrid, basis)

g.mem_report()

# now define coarse-grid operator
ftmp = g.lattice(basis[0])
ctmp = g.lattice(cstart)
g.block.promote(cstart, ftmp, basis)
g.block.project(ctmp, ftmp, basis)
g.message(
    "Test precision of promote-project chain: %g"
    % (g.norm2(cstart - ctmp) / g.norm2(cstart))
)

g.mem_report()

try:
    cevec, cev = g.load("cevec", {"grids": cgrid})
except g.LoadError:
    cevec, cev = irl(cop, cstart, params["checkpointer"])
    g.save("cevec", (cevec, cev))
Пример #19
0
        def inv_lvl(psi, src, lvl):
            # assertions
            assert psi != src

            # neighbors
            nc_lvl = s.nc_lvl[lvl]

            # aliases
            t = self.t[lvl]
            r = self.r[lvl]
            pp = self.print_prefix[lvl]
            r_c = self.r[nc_lvl] if lvl != s.coarsest else None
            e_c = self.e[nc_lvl] if lvl != s.coarsest else None
            mat_c = s.mat[nc_lvl] if lvl != s.coarsest else None
            mat = s.mat[lvl]
            bm = s.blockmap[lvl]
            slv_s = self.smooth_solver[lvl] if lvl != s.coarsest else None
            slv_w = self.wrapper_solver[lvl] if lvl <= s.coarsest - 2 else None
            slv_c = self.coarsest_solver if lvl == s.coarsest else None

            # start clocks
            t("misc")

            if self.verbose:
                g.message("%s starting inversion routine: psi = %g, src = %g" %
                          (pp, g.norm2(psi), g.norm2(src)))

            inputnorm = g.norm2(src)

            if lvl == s.coarsest:
                t("invert")
                g.default.push_verbose(get_slv_name(slv_c), False)
                slv_c(mat)(psi, src)
                g.default.pop_verbose()
                self.history[lvl]["coarsest"].append(get_slv_history(slv_c))
            else:
                t("copy")
                r @= src

                # fine to coarse
                t("to_coarser")
                bm.project(r_c, r)

                if self.verbose:
                    t("output")
                    g.message("%s done calling f2c: r_c = %g, r = %g" %
                              (pp, g.norm2(r_c), g.norm2(r)))

                # call method on next level
                t("on_coarser")
                e_c[:] = 0.0
                if slv_w is not None and lvl < s.coarsest - 1:

                    def prec(matrix):
                        def ignore_mat(dst_p, src_p):
                            inv_lvl(dst_p, src_p, nc_lvl)

                        return ignore_mat

                    g.default.push_verbose(get_slv_name(slv_w), False)
                    slv_w.modified(prec=prec)(mat_c)(e_c, r_c)
                    g.default.pop_verbose()
                    self.history[lvl]["wrapper"].append(get_slv_history(slv_w))
                else:
                    inv_lvl(e_c, r_c, nc_lvl)

                if self.verbose:
                    t("output")
                    g.message(
                        "%s done calling coarser level: e_c = %g, r_c = %g" %
                        (pp, g.norm2(e_c), g.norm2(r_c)))

                # coarse to fine
                t("from_coarser")
                bm.promote(psi, e_c)

                if self.verbose:
                    t("output")
                    g.message("%s done calling c2f: psi = %g, e_c = %g" %
                              (pp, g.norm2(psi), g.norm2(e_c)))

                t("residual")
                tmp = g.lattice(src)
                mat(tmp, psi)
                tmp @= src - tmp
                res_cgc = (g.norm2(tmp) / inputnorm)**0.5

                # smooth
                t("smooth")
                g.default.push_verbose(get_slv_name(slv_s), False)
                slv_s(mat)(psi, src)
                g.default.pop_verbose()
                self.history[lvl]["smooth"].append(get_slv_history(slv_s))

                t("residual")
                mat(tmp, psi)
                tmp @= src - tmp
                res_smooth = (g.norm2(tmp) / inputnorm)**0.5

                if self.verbose:
                    t("output")
                    g.message(
                        "%s done smoothing: input norm = %g, coarse residual = %g, smooth residual = %g"
                        % (pp, inputnorm, res_cgc, res_smooth))

            t()

            if self.verbose:
                t("output")
                g.message("%s ending inversion routine: psi = %g, src = %g" %
                          (pp, g.norm2(psi), g.norm2(src)))
                t()
Пример #20
0
# define coarse-grid operator
cop = b.coarse_operator(c(w.Mpc))
eps2 = g.norm2(cop * cstart -
               b.project * c(w.Mpc) * b.promote * cstart) / g.norm2(cstart)
g.message(f"Test coarse-grid promote/project cycle: {eps2}")
assert eps2 < 1e-13

# coarse-grid lanczos
cevec, cev = irl(cop, cstart)

# smoothened evals
smoother = inv.cg({"eps": 1e-6, "maxiter": 10})(w.Mpc)
smoothed_evals = []
g.default.push_verbose("cg", False)
tmpf = g.lattice(basis[0])
for i, cv in enumerate(cevec):
    tmpf @= smoother * b.promote * cv
    smoothed_evals = smoothed_evals + g.algorithms.eigen.evals(
        w.Mpc, [tmpf], calculate_eps2=False, real=True)
g.default.pop_verbose()

# test coarse-grid deflation (re-use fine-grid evals instead of smoothing)
cdefl = inv.sequence(inv.coarse_deflate(cevec, basis, smoothed_evals), cg)

sol_cdefl = g.eval(cdefl(w.Mpc) * start)
eps2 = g.norm2(w.Mpc * sol_cdefl - start) / g.norm2(start)
niter_cdefl = len(cg.history)
g.message("Test resid/iter coarse-grid deflated cg: ", eps2, niter_cdefl)
g.message("Compare fine-grid deflated cg iter: ", niter_defl)
g.message("Compare cg iter: ", niter_cg)
Пример #21
0
    assert eps == 0.0

# test numpy versus lattice tensor multiplication
for a_type in [
        g.ot_matrix_spin_color(4, 3),
        g.ot_vector_spin_color(4, 3),
        g.ot_matrix_spin(4),
        g.ot_vector_spin(4),
        g.ot_matrix_color(3),
        g.ot_vector_color(3),
]:
    # mtab
    for e in a_type.mtab:
        if a_type.mtab[e][1] is not None:
            b_type = g.str_to_otype(e)
            a = rng.cnormal(g.lattice(grid, a_type))
            b = rng.cnormal(g.lattice(grid, b_type))
            mul_lat = g(a * b)[0, 0, 0, 0]
            mul_np = a[0, 0, 0, 0] * b[0, 0, 0, 0]
            eps2 = g.norm2(mul_lat - mul_np) / g.norm2(mul_lat)
            g.message(f"Test {a_type.__name__} * {b_type.__name__}: {eps2}")
            if eps2 > 1e-12:
                g.message(mul_lat)
                g.message(
                    np.tensordot(a[0, 0, 0, 0].array,
                                 b[0, 0, 0, 0].array,
                                 axes=a_type.mtab[e][1]).shape)
                assert eps2 < 1e-12

    # rmtab
    for e in a_type.rmtab:
Пример #22
0
    def jacobian(self, fields, fields_prime, src):

        nd = fields[0].grid.nd
        U = fields[0:nd]
        U_prime = fields_prime[0:nd]

        rho = get_rho(U, self.params)
        C = g.qcd.gauge.staple_sum(U, rho=rho)

        assert len(src) == nd

        dst = [g.lattice(s) for s in src]
        exp_iQ = [None] * nd
        Lambda = [None] * nd
        Sigma_prime = [None] * nd

        # (75) of https://arxiv.org/pdf/hep-lat/0311018.pdf
        for mu in range(nd):

            #
            # Sigma == g.adj(U) * gradient * 1j
            #
            Sigma_prime[mu] = g(g.adj(U_prime[mu]) * src[mu] * 1j)
            U_Sigma_prime_mu = g(U[mu] * Sigma_prime[mu])

            iQ_mu = g.qcd.gauge.project.traceless_anti_hermitian(C[mu] *
                                                                 g.adj(U[mu]))
            exp_iQ[mu], Lambda[mu] = g.matrix.exp.function_and_gradient(
                iQ_mu, U_Sigma_prime_mu)

            dst[mu] @= Sigma_prime[mu] * exp_iQ[mu] + g.adj(
                C[mu]) * 1j * Lambda[mu]

        for mu in range(nd):
            for nu in range(nd):

                if mu == nu:
                    continue

                rho_mu_nu = rho[mu, nu]
                rho_nu_mu = rho[nu, mu]

                if abs(rho_nu_mu) != 0.0 or abs(rho_mu_nu) != 0.0:
                    U_nu_x_plus_mu = g.cshift(U[nu], mu, 1)
                    U_mu_x_plus_nu = g.cshift(U[mu], nu, 1)
                    Lambda_nu_x_plus_mu = g.cshift(Lambda[nu], mu, 1)
                    Lambda_mu_x_plus_nu = g.cshift(Lambda[mu], nu, 1)

                    dst[mu] -= (1j * rho_nu_mu * U_nu_x_plus_mu *
                                g.adj(U_mu_x_plus_nu) * g.adj(U[nu]) *
                                Lambda[nu])

                    dst[mu] += (1j * rho_nu_mu * Lambda_nu_x_plus_mu *
                                U_nu_x_plus_mu * g.adj(U_mu_x_plus_nu) *
                                g.adj(U[nu]))

                    dst[mu] -= (1j * rho_mu_nu * U_nu_x_plus_mu *
                                g.adj(U_mu_x_plus_nu) * Lambda_mu_x_plus_nu *
                                g.adj(U[nu]))

                    dst[mu] += g.cshift(
                        1j * rho_nu_mu * g.adj(U_nu_x_plus_mu) * g.adj(U[mu]) *
                        Lambda[nu] * U[nu] -
                        1j * rho_mu_nu * g.adj(U_nu_x_plus_mu) * g.adj(U[mu]) *
                        Lambda[mu] * U[nu] -
                        1j * rho_nu_mu * g.adj(U_nu_x_plus_mu) *
                        Lambda_nu_x_plus_mu * g.adj(U[mu]) * U[nu],
                        nu,
                        -1,
                    )

        for mu in range(nd):
            dst[mu] @= U[mu] * dst[mu] * (-1j)
            dst[mu] @= g.qcd.gauge.project.traceless_hermitian(dst[mu])

        return dst
Пример #23
0
    def __call__(self, link, staple, mask):
        verbose = g.default.is_verbose(
            "su2_heat_bath"
        )  # need verbosity categories [ performance, progress ]
        project_method = self.params["project_method"]

        # params
        niter = self.params["niter"]

        # temporaries
        grid = link.grid
        u2 = g.lattice(grid, g.ot_matrix_su_n_fundamental_group(2))
        u2_eye = g.identity(u2)
        one = g.identity(g.complex(grid))
        zero = g.complex(grid)
        zero[:] = 0
        eps = g.complex(grid)
        eps[:] = grid.precision.eps * 10.0
        xr = [g.complex(grid) for i in range(4)]
        a = [g.complex(grid) for i in range(4)]
        two_pi = g.complex(grid)
        two_pi[:] = 2.0 * np.pi
        accepted = g.complex(grid)
        d = g.complex(grid)
        V_eye = g.identity(link)

        # pauli
        pauli1, pauli2, pauli3 = tuple([g.lattice(u2) for i in range(3)])
        ident = g.identity(u2)
        pauli1[:] = 1j * np.array([[0, 1], [1, 0]], dtype=grid.precision.complex_dtype)
        pauli2[:] = 1j * np.array([[0, 1j], [-1j, 0]], dtype=grid.precision.complex_dtype)
        pauli3[:] = 1j * np.array([[1, 0], [0, -1]], dtype=grid.precision.complex_dtype)

        # counter
        num_sites = round(g.norm2(g.where(mask, one, zero)))

        # shortcuts
        inv = g.component.pow(-1.0)

        # go through subgroups
        for subgroup in link.otype.su2_subgroups():

            V = g.eval(link * g.adj(staple))

            # extract u2 subgroup following Kennedy/Pendleton
            link.otype.block_extract(u2, V, subgroup)
            u2 @= u2 - g.adj(u2) + g.identity(u2) * g.trace(g.adj(u2))
            udet = g.matrix.det(u2)
            adet = g.component.abs(udet)
            nzmask = adet > eps
            u2 @= g.where(nzmask, u2, u2_eye)
            udet = g.where(nzmask, udet, one)
            xi = g.eval(0.5 * g.component.sqrt(udet))
            u2 @= 0.5 * u2 * inv(xi)

            # make sure that su2 subgroup projection worked
            assert g.group.defect(u2) < u2.grid.precision.eps * 10.0

            xi @= 2.0 * xi
            alpha = g.component.real(xi)

            # main loop
            it = 0
            num_accepted = 0
            accepted[:] = 0
            d[:] = 0
            while (num_accepted < num_sites) and (it < niter):
                self.rng.uniform_real(xr, min=0.0, max=1.0)

                xr[1] @= -g.component.log(xr[1]) * inv(alpha)
                xr[2] @= -g.component.log(xr[2]) * inv(alpha)
                xr[3] @= g.component.cos(g.eval(xr[3] * two_pi))
                xr[3] @= xr[3] * xr[3]

                xrsq = g.eval(xr[2] + xr[1] * xr[3])

                d = g.where(accepted, d, xrsq)

                thresh = g.eval(one - d * 0.5)
                xrsq @= xr[0] * xr[0]

                newly_accepted = g.where(xrsq < thresh, one, zero)
                accepted = g.where(mask, g.where(newly_accepted, newly_accepted, accepted), zero)

                num_accepted = round(g.norm2(g.where(accepted, one, zero)))

                it += 1

            if verbose:
                g.message(f"SU(2)-heatbath update needed {it} / {niter} iterations")

            # update link
            a[0] @= g.where(mask, one - d, zero)

            a123mag = g.component.sqrt(g.component.abs(one - a[0] * a[0]))

            phi, cos_theta = g.complex(grid), g.complex(grid)
            self.rng.uniform_real([phi, cos_theta])
            phi @= phi * two_pi
            cos_theta @= (cos_theta * 2.0) - one
            sin_theta = g.component.sqrt(g.component.abs(one - cos_theta * cos_theta))

            a[1] @= a123mag * sin_theta * g.component.cos(phi)
            a[2] @= a123mag * sin_theta * g.component.sin(phi)
            a[3] @= a123mag * cos_theta

            ua = g.eval(a[0] * ident + a[1] * pauli1 + a[2] * pauli2 + a[3] * pauli3)

            b = g.where(mask, g.adj(u2) * ua, ident)
            link.otype.block_insert(V, b, subgroup)

            link @= g.where(accepted, V * link, link)

            # check
            check = g.where(accepted, ua * g.adj(ua) - ident, 0.0 * ident)
            delta = (g.norm2(check) / g.norm2(ident)) ** 0.5
            assert delta < grid.precision.eps * 10.0

            check = g.where(accepted, b * g.adj(b) - ident, 0.0 * ident)
            delta = (g.norm2(check) / g.norm2(ident)) ** 0.5
            assert delta < grid.precision.eps * 10.0

            check = g.where(accepted, V * g.adj(V) - V_eye, 0.0 * V_eye)
            delta = (g.norm2(check) / g.norm2(V_eye)) ** 0.5
            assert delta < grid.precision.eps * 10.0

        # project
        g.project(link, project_method)
Пример #24
0
#
# Authors: Christoph Lehner 2020
#
import gpt as g
import numpy as np

grid = g.grid([8, 4, 4, 4], g.double)
rng = g.random("test")
dims = ["X", "Y", "Z", "T"]

for lat in [g.mspincolor, g.mspin]:
    l = lat(grid)
    g.message(lat.__name__)
    rng.cnormal(l)

    dst = g.lattice(l)
    ref = g.lattice(l)

    assert g.norm2(l) > 1e-7

    for i, d1 in enumerate(dims):
        for j, d2 in enumerate(dims):
            if i < j:
                dst @= g.gamma[d1] * g.gamma[d2] * l
                dst -= g.gamma[d2] * g.gamma[d1] * l
                dst *= 1 / 2.0
                ref @= g.gamma["Sigma%s%s" % (d1, d2)] * l
                eps = g.norm2(dst - ref) / g.norm2(l)
                g.message("Test Sigma%s%s: " % (d1, d2), eps)
                assert eps == 0.0
Пример #25
0
    def read_lattice(self, a):
        g_desc = a[0]
        cv_desc = a[1]
        l_desc = a[2]
        filepos = [int(x) for x in a[3:]]

        # first find grid
        if not g_desc in self.params["grids"]:
            self.params["grids"][g_desc] = gpt.grid(g_cesc)
        g = self.params["grids"][g_desc]

        # create a cartesian view and lattice to load
        l = gpt.lattice(g, l_desc)
        cv0 = gpt.cartesian_view(-1, cv_desc, g.fdimensions, g.cb,
                                 l.checkerboard())

        # find tasks for my node
        views_for_node = self.views_for_node(cv0, g)

        # performance
        dt_distr, dt_crc, dt_read, dt_misc = 0.0, 0.0, 0.0, 0.0
        szGB = 0.0
        g.barrier()
        t0 = gpt.time()

        # need to load all views
        for xk, iview in enumerate(views_for_node):
            g.barrier()
            dt_read -= gpt.time()

            f, pos = self.open_view(xk, iview, False, cv_desc, g.fdimensions,
                                    g.cb, l.checkerboard())

            if not f is None:
                f.seek(filepos[iview], 0)
                ntag = int.from_bytes(f.read(4), byteorder='little')
                f.read(ntag)  # not needed if index is present
                crc_exp = int.from_bytes(f.read(4), byteorder='little')
                nd = int.from_bytes(f.read(4), byteorder='little')
                f.read(8 * nd)  # not needed if index is present
                sz = int.from_bytes(f.read(8), byteorder='little')
                data = memoryview(f.read(sz))
                dt_crc -= gpt.time()
                crc_comp = gpt.crc32(data)
                dt_crc += gpt.time()
                assert (crc_comp == crc_exp)
                sys.stdout.flush()
                szGB += len(data) / 1024.**3.
            else:
                assert (len(pos) == 0)
                data = None

            g.barrier()
            dt_read += gpt.time()
            dt_distr -= gpt.time()
            l[pos] = data
            g.barrier()
            dt_distr += gpt.time()

        g.barrier()
        t1 = gpt.time()

        szGB = g.globalsum(szGB)
        if self.verbose and dt_crc != 0.0:
            gpt.message(
                "Read %g GB at %g GB/s (%g GB/s for distribution, %g GB/s for reading + checksum, %g GB/s for checksum, %d views per node)"
                % (szGB, szGB / (t1 - t0), szGB / dt_distr, szGB / dt_read,
                   szGB / dt_crc, len(views_for_node)))

        # TODO:
        # split grid exposure, allow cgpt_distribute to be given a communicator
        # and take it in importexport.h, add debug info here
        # more benchmarks, useful to create a plan for cgpt_distribute and cache? immutable numpy array returned from coordinates, attach plan

        return l
Пример #26
0
 def lattice(self, otype):
     return gpt.lattice(self.local_grid, otype)
Пример #27
0
    def __init__(self, op, parity):
        self.op = op
        self.otype = op.otype[0]
        self.parity = gpt.odd if parity is None else parity
        self.F_grid_eo = op.F_grid_eo
        self.F_grid = op.F_grid
        self.U_grid = op.U_grid
        self.tmp = gpt.lattice(self.F_grid_eo, self.otype)
        self.tmp2 = gpt.lattice(self.F_grid_eo,
                                self.otype)  # need for nested call in R
        self.in_p = gpt.lattice(self.F_grid_eo, self.otype)
        self.in_np = gpt.lattice(self.F_grid_eo, self.otype)
        self.out_p = gpt.lattice(self.F_grid_eo, self.otype)
        self.out_np = gpt.lattice(self.F_grid_eo, self.otype)
        self.ImportPhysicalFermionSource = self.op.ImportPhysicalFermionSource
        self.ExportPhysicalFermionSolution = self.op.ExportPhysicalFermionSolution
        self.Dminus = self.op.Dminus
        self.ExportPhysicalFermionSource = self.op.ExportPhysicalFermionSource

        def _N(op, ip):
            self.op.Meooe.mat(self.tmp2, ip)
            self.op.Mooee.inv_mat(op, self.tmp2)
            self.op.Meooe.mat(self.tmp2, op)
            self.op.Mooee.inv_mat(op, self.tmp2)
            op @= ip - op

        def _NDag(op, ip):
            self.op.Mooee.adj_inv_mat(self.tmp2, ip)
            self.op.Meooe.adj_mat(op, self.tmp2)
            self.op.Mooee.adj_inv_mat(self.tmp2, op)
            self.op.Meooe.adj_mat(op, self.tmp2)
            op @= ip - op

        def _NDagN(op, ip):
            _N(self.tmp, ip)
            _NDag(op, self.tmp)

        def _L(o, ip):
            self.out_p @= ip
            self.op.Meooe.mat(self.tmp, ip)
            self.op.Mooee.inv_mat(self.out_np, self.tmp)
            self.out_np @= -self.out_np
            self.export_parity(o)

        def _L_pseudo_inverse(op, i):
            self.import_parity(i)
            op @= self.in_p

        def _S(o, i):
            self.import_parity(i)
            self.op.Mooee.inv_mat(self.out_np, self.in_np)
            self.out_p[:] = 0
            self.out_p.checkerboard(self.parity)
            self.export_parity(o)

        self.L = gpt.matrix_operator(
            mat=_L,
            inv_mat=_L_pseudo_inverse,
            otype=op.otype,
            grid=(self.F_grid, self.F_grid_eo),
            cb=(None, self.parity),
        )

        self.S = gpt.matrix_operator(
            mat=_S,
            otype=op.otype,
            grid=self.F_grid,
        )

        self.N = gpt.matrix_operator(mat=_N,
                                     adj_mat=_NDag,
                                     otype=op.otype,
                                     grid=self.F_grid_eo,
                                     cb=self.parity)

        self.NDagN = gpt.matrix_operator(
            mat=_NDagN,
            adj_mat=_NDagN,
            otype=op.otype,
            grid=self.F_grid_eo,
            cb=self.parity,
        )

        for undressed in ["N", "NDagN"]:
            self.__dict__[undressed].split = lambda mpi: eo1_base(
                op.split(mpi), parity).__dict__[undressed]
Пример #28
0
def cartesian(field):
    if isinstance(field, list):
        return [cartesian(f) for f in field]
    return g.lattice(field.grid, field.otype.cartesian()).checkerboard(field.checkerboard())
Пример #29
0
    def __call__(self, mat, src, psi, prec=None):
        # verbosity
        self.verbose = g.default.is_verbose("fgmres")
        checkres = True  # for now

        # total time
        tt0 = time()

        # parameters
        rlen = self.restartlen

        # tensors
        dtype = np.complex128
        H = np.zeros((rlen + 1, rlen), dtype)
        c = np.zeros((rlen + 1), dtype)
        s = np.zeros((rlen + 1), dtype)
        y = np.zeros((rlen + 1), dtype)
        gamma = np.zeros((rlen + 1), dtype)

        # fields
        mmpsi, r = g.copy(src), g.copy(src),
        V = [g.lattice(src) for i in range(rlen + 1)]
        if not prec is None:  # save vectors if unpreconditioned
            Z = [g.lattice(src) for i in range(rlen + 1)]

        # residual
        ssq = g.norm2(src)
        rsq = self.eps**2. * ssq

        # initial values
        r2 = self.restart(mat, psi, mmpsi, src, r, V, gamma)

        for k in range(self.maxiter):
            # iteration within current krylov space
            i = k % rlen

            # iteration criteria
            reached_maxiter = (k + 1 == self.maxiter)
            need_restart = (i + 1 == rlen)

            t0 = time()
            if not prec is None:
                prec(V[i], Z[i])
            t1 = time()

            t2 = time()
            if not prec is None:
                mat(Z[i], V[i + 1])
            else:
                mat(V[i], V[i + 1])
            t3 = time()

            t4 = time()
            g.orthogonalize(V[i + 1], V[0:i + 1], H[:, i])
            t5 = time()

            t6 = time()
            H[i + 1, i] = g.norm2(V[i + 1])**0.5

            if H[i + 1, i] == 0.:
                g.message("fgmres breakdown, H[%d, %d] = 0" % (i + 1, i))
                break

            V[i + 1] /= H[i + 1, i]
            t7 = time()

            t8 = time()
            self.qr_update(s, c, H, gamma, i)
            r2 = np.absolute(gamma[i + 1])**2
            t9 = time()

            if self.verbose:
                g.message(
                    "Timing[s]: Prec = %g, Matrix = %g, Orthog = %g, RestArnoldi = %g, QR = %g"
                    % (t1 - t0, t3 - t2, t5 - t4, t7 - t6, t9 - t8))
                g.message("res^2[ %d, %d ] = %g" % (k, i, r2))

            if r2 <= rsq or need_restart or reached_maxiter:
                if not prec is None:
                    self.update_psi(psi, gamma, H, y, Z, i)
                else:
                    self.update_psi(psi, gamma, H, y, V, i)

                if r2 <= rsq:
                    if self.verbose:
                        tt1 = time()
                        g.message("Converged in %g s" % (tt1 - tt0))
                    if checkres:
                        res = self.calc_res(mat, psi, mmpsi, src, r) / ssq
                        g.message(
                            "Computed res = %g, true res = %g, target = %g" %
                            (r2**0.5, res**0.5, self.eps))
                    break

                if reached_maxiter:
                    if verbose:
                        tt1 = time()
                        g.message("Did NOT converge in %g s" % (tt1 - tt0))
                        if checkres:
                            res = self.calc_res(mat, psi, mmpsi, src, r) / ssq
                            g.message(
                                "Computed res = %g, true res = %g, target = %g"
                                % (r2**0.5, res**0.5, self.eps))

                if need_restart:
                    r2 = self.restart(mat, psi, mmpsi, src, r, V, gamma)
                    if self.verbose:
                        g.message("Performed restart")
Пример #30
0
#!/usr/bin/env python3
#
# Authors: Christoph Lehner 2020
#
# Desc.: Illustrate core concepts and features
#
import gpt as g
import numpy as np

# load configuration
rng = g.random("test")
U = g.qcd.gauge.random(g.grid([8, 8, 8, 16], g.double), rng)
V = rng.lie(g.lattice(U[0]))
U_transformed = g.qcd.gauge.transformed(U, V)

# Test gauge invariance of plaquette
P = g.qcd.gauge.plaquette(U)
P_transformed = g.qcd.gauge.plaquette(U_transformed)
eps = abs(P - P_transformed)
g.message(
    f"Plaquette before {P} and after {P_transformed} gauge transformation: {eps}"
)
assert eps < 1e-13

# Test gauge covariance of staple
rho = np.array([[0.0 if i == j else 0.1 for i in range(4)] for j in range(4)],
               dtype=np.float64)
C = g.qcd.gauge.smear.staple_sum(U, rho=rho)
C_transformed = g.qcd.gauge.smear.staple_sum(U_transformed, rho=rho)
for mu in range(len(C)):
    q = g.sum(g.trace(C[mu] * g.adj(U[mu]))) / U[0].grid.gsites