Ejemplo n.º 1
0
Archivo: core.py Proyecto: waterret/gpt
def correlate_test_4d(a, b, x):
    # c[x] = (1/vol) sum_y a[y]*b[y+x]
    bprime = b
    L = a.grid.gdimensions
    vol = L[0] * L[1] * L[2] * L[3]
    for i in range(4):
        # see core test: dst = g.cshift(src, 0, 1) -> dst[x] = src[x+1]
        bprime = g.cshift(bprime, i, x[i])  # bprime[y] = b[y+x]
    return g.sum(a * bprime) / vol
Ejemplo n.º 2
0
def polyakov_loop(U, mu):
    # tr[ prod_j U_{\mu}(m, j) ]
    vol = float(U[0].grid.fsites)
    Nc = U[0].otype.Nc
    tmp_polyakov_loop = g.copy(U[mu])
    for n in range(1, U[0].grid.fdimensions[mu]):
        tmp = g.cshift(tmp_polyakov_loop, mu, 1)
        tmp_polyakov_loop = g.eval(U[mu] * tmp)

    return g.sum(g.trace(tmp_polyakov_loop)) / Nc / vol
Ejemplo n.º 3
0
def plaquette(U):
    # U[mu](x)*U[nu](x+mu)*adj(U[mu](x+nu))*adj(U[nu](x))
    tr = 0.0
    vol = float(U[0].grid.gsites)
    for mu in range(4):
        for nu in range(mu):
            tr += g.sum(
                g.trace(U[mu] * g.cshift(U[nu], mu, 1) *
                        g.adj(g.cshift(U[mu], nu, 1)) * g.adj(U[nu])))
    return 2. * tr.real / vol / 4. / 3. / 3.
Ejemplo n.º 4
0
def project_onto_suN(dest, u_unprojected, params):
    t_total = -gpt.time()
    t_trace, t_projectstep = 0.0, 0.0

    vol = dest.grid.fsites
    t_trace -= gpt.time()
    old_trace = gpt.sum(gpt.trace(dest * u_unprojected)).real / (vol * 3)
    t_trace += gpt.time()

    for _ in range(params["max_iteration"]):
        # perform a single projection step
        t_projectstep -= gpt.time()
        project_to_suN_step(dest, u_unprojected)
        t_projectstep += gpt.time()

        # calculate new trace
        t_trace -= gpt.time()
        new_trace = gpt.sum(gpt.trace(dest * u_unprojected)).real / (vol * 3)
        t_trace += gpt.time()

        epsilon = np.abs((new_trace - old_trace) / old_trace)
        gpt.message(f"APE iter {_}, epsilon: {epsilon}")

        if epsilon < params["accuracy"]:
            break
        old_trace = new_trace
    else:
        raise RuntimeError("Projection to SU(3) did not converge.")

    t_total += gpt.time()

    if gpt.default.is_verbose("project_onto_suN"):
        t_profiled = t_trace + t_projectstep
        t_unprofiled = t_total - t_profiled

        gpt.message("project_onto_suN: total", t_total, "s")
        gpt.message("project_onto_suN: t_trace", t_trace, "s",
                    round(100 * t_trace / t_total, 1), "%")
        gpt.message("project_onto_suN: t_projectstep", t_projectstep, "s",
                    round(100 * t_projectstep / t_total, 1), "%")
        gpt.message("project_onto_suN: unprofiled", t_unprofiled, "s",
                    round(100 * t_unprofiled / t_total, 1), "%")
Ejemplo n.º 5
0
def Udelta_average(U):
    """
    compute < tr Udelta * Udelta^\dagger >
    """
    Volume = float(U[0].grid.fsites)
    Udelta = g.lattice(U[0].grid, U[0].otype)
    Udelta[:] = 0.0
    for [i, j, k] in permutations([0, 1, 2]):
        Udelta += U[i] * g.cshift(U[j], i, 1) * g.cshift(
            g.cshift(U[k], i, 1), j, 1)
    return g.sum(g.trace(Udelta * g.adj(Udelta))).real / Volume / 36.0
Ejemplo n.º 6
0
def plaquette(U):
    # U[mu](x)*U[nu](x+mu)*adj(U[mu](x+nu))*adj(U[nu](x))
    tr = 0.0
    vol = float(U[0].grid.fsites)
    Nd = len(U)
    ndim = U[0].otype.shape[0]
    for mu in range(Nd):
        for nu in range(mu):
            tr += g.sum(
                g.trace(U[mu] * g.cshift(U[nu], mu, 1) *
                        g.adj(g.cshift(U[mu], nu, 1)) * g.adj(U[nu])))
    return 2.0 * tr.real / vol / Nd / (Nd - 1) / ndim
Ejemplo n.º 7
0
 def __call__(self, V):
     V = g.util.from_list(V)
     return sum(
         [g.sum(g.trace(u))
          for u in g.qcd.gauge.transformed(self.U, V)]).real * (-2.0)
Ejemplo n.º 8
0
def measure(phi):
    return [g.sum(phi).real / grid.fsites, g.norm2(phi) / grid.fsites]
Ejemplo n.º 9
0
def measure(x):
    return [g.sum(x).real / grid.fsites, g.norm2(x) / grid.fsites]
Ejemplo n.º 10
0
def rectangle(U,
              first,
              second=None,
              third=None,
              cache=default_rectangle_cache):
    #
    # Calling conventions:
    #
    # rectangle(U, 2, 1)
    # rectangle(U, 2, 1, 3)  # fixes the temporal extent to be L_mu and averages over spatial sizes L_nu
    # rectangle(U, [(1,1), (2,1)])
    #
    # or specify explicit mu,L_mu,nu,L_nu configurations
    # rectangle(U, [ [ (0,1,3,2), (1,2,3,2) ] ])
    #
    if second is not None:
        L_mu = first
        L_nu = second
        min_mu = third if third is not None else 0
        configurations = [[(mu, L_mu, nu, L_nu)
                           for mu in range(min_mu, len(U))
                           for nu in range(mu)]]
    else:
        configurations = []
        for f in first:
            if type(f) is tuple:
                L_mu = f[0]
                L_nu = f[1]
                min_mu = f[2] if len(f) == 3 else 0
                configurations.append([(mu, L_mu, nu, L_nu)
                                       for mu in range(min_mu, len(U))
                                       for nu in range(mu)])
            else:
                configurations.append(f)

    cache_key = str(configurations)
    if cache_key not in cache:
        paths = []
        elements = []
        for configuration in configurations:
            c_paths = [
                g.qcd.gauge.path().f(mu, L_mu).f(nu, L_nu).b(mu,
                                                             L_mu).b(nu, L_nu)
                for mu, L_mu, nu, L_nu in configuration
            ]
            elements.append(len(c_paths))
            paths = paths + c_paths
        cache[cache_key] = (g.qcd.gauge.transport(U, paths), elements)

    transport = cache[cache_key][0]
    ranges = cache[cache_key][1]

    loops = transport(U)

    vol = float(U[0].grid.fsites)
    ndim = U[0].otype.shape[0]

    value = 0.0
    idx = 0
    ridx = 0
    results = []
    for p in loops:
        value += g.sum(g.trace(p))
        idx += 1
        if idx == ranges[ridx]:
            results.append(value.real / vol / idx / ndim)
            idx = 0
            ridx = ridx + 1
            value = 0.0
    if len(results) == 1:
        return results[0]
    return results
Ejemplo n.º 11
0
# Calculate Plaquette
g.message(g.qcd.gauge.plaquette(U))
g.message(plaquette(U))

# Precision change
Uf = g.convert(U, g.single)
g.message(g.qcd.gauge.plaquette(Uf))

Uf0 = g.convert(U[0], g.single)
g.message(g.norm2(Uf0))

del Uf0
g.meminfo()

# Slice
x = g.sum(Uf[0])

print(x)

grid = g.grid([4, 4, 4, 4], g.single)
gr = g.complex(grid)

gr[0, 0, 0, 0] = 2
gr[1, 0, 0, 0] = 3

gride = g.grid([4, 4, 4, 4], g.single, g.redblack)
gre = g.complex(gride)
g.pick_cb(g.even, gre, gr)
gre[2, 0, 0, 0] = 4
g.set_cb(gr, gre)
g.meminfo()
Ejemplo n.º 12
0
                ) for x in range(L[dimension])
            ])
            assert np.allclose(full_sliced[n],
                               sliced_numpy,
                               atol=0.0,
                               rtol=1e-12)

################################################################################
# Test FFT
################################################################################
fft_l_sp = g.eval(g.fft() * l_sp)
eps = g.norm2(g.adj(g.fft()) * fft_l_sp - l_sp) / g.norm2(l_sp)
g.message("FFTinv * FFT:", eps)
assert eps < 1e-12

eps = g.norm2(g.sum(exp_ixp * l_sp) / np.prod(L) - fft_l_sp[1, 2, 3, 4])
g.message("FFT forward test:", eps)
assert eps < 1e-12

fft_mom_A = g.slice(
    g.exp_ixp(2.0 * np.pi * np.array([1, 2, 3, 0]) / L) * l_sp, 3) / np.prod(
        L[0:3])
fft_mom_B = [
    g.vcolor(x) for x in g.eval(g.fft([0, 1, 2]) * l_sp)[1, 2, 3, 0:L[3]]
]
for t in range(L[3]):
    eps = g.norm2(fft_mom_A[t] - fft_mom_B[t])
    assert eps < 1e-12


################################################################################
Ejemplo n.º 13
0
 def inner_product(self, left, right):
     return gpt.sum(gpt.trace(gpt.adj(left) * right)).real
Ejemplo n.º 14
0
        assert eps < 1e-13


# outer product of vcomplex
n = 12
cm = g.mcomplex(grid, n)
cl = rng.normal(g.vcomplex(grid, n))
cr = rng.normal(g.vcomplex(grid, n))
cm @= cl * g.adj(cr)
for i in range(n):
    for j in range(n):
        eps = np.linalg.norm(cm[0, 0, 0, 0, i, j] - cl[0, 0, 0, 0, i] * cr[0, 0, 0, 0, j].conj())
        assert eps < 1e-13

# test g.sum
s1 = g.sum(cm).array
s2 = np.sum(cm[:, :, :, :], axis=0)
cm.grid.globalsum(s2)
eps = np.linalg.norm(s2 - s1) ** 2.0 / grid.gsites / (n * n)
assert eps < 1e-10

s1 = g.sum(cv).array
s2 = np.sum(cv[:, :, :, :], axis=0)
cm.grid.globalsum(s2)
eps = np.linalg.norm(s2 - s1) ** 2.0 / grid.gsites / (n)
assert eps < 1e-10

# once inner product is implemented, test:
# cs = g.real(grid)
# cs @= g.adj(cl) * cr
# eps = abs(g.inner_product(cl, cr) - g.sum(cs))
Ejemplo n.º 15
0
g.message(
    f"Plaquette before {P} and after {P_transformed} gauge transformation: {eps}"
)
assert eps < 1e-13

# Test gauge invariance of R_2x1
R_2x1_transformed = g.qcd.gauge.rectangle(U_transformed, 2, 1)
eps = abs(R_2x1 - R_2x1_transformed)
g.message(
    f"R_2x1 before {R_2x1} and after {R_2x1_transformed} gauge transformation: {eps}"
)
assert eps < 1e-13

# Test field version
R_2x1_field = g(
    g.sum(g.qcd.gauge.rectangle(U, 2, 1, field=True)) / U[0].grid.gsites)
eps = abs(R_2x1 - R_2x1_field)
g.message(f"R_2x1 field check: {eps}")
assert eps < 1e-13

# Test gauge covariance of staple
rho = np.array([[0.0 if i == j else 0.1 for i in range(4)] for j in range(4)],
               dtype=np.float64)
C = g.qcd.gauge.staple_sum(U, rho=rho)
C_transformed = g.qcd.gauge.staple_sum(U_transformed, rho=rho)
for mu in range(len(C)):
    q = g.sum(g.trace(C[mu] * g.adj(U[mu]))) / U[0].grid.gsites
    q_transformed = (
        g.sum(g.trace(C_transformed[mu] * g.adj(U_transformed[mu]))) /
        U[0].grid.gsites)
Ejemplo n.º 16
0
)
assert eps < 1e-13

# Without trace and real projection
R_2x1_notp = g.qcd.gauge.rectangle(U_transformed,
                                   2,
                                   1,
                                   trace=False,
                                   real=False)
eps = abs(g.trace(R_2x1_notp).real - R_2x1)
g.message(f"R_2x1 no real and trace check: {eps}")
assert eps < 1e-13

# Test field version
R_2x1_field = g(
    g.sum(g.qcd.gauge.rectangle(U, 2, 1, field=True)) / U[0].grid.gsites)
eps = abs(R_2x1 - R_2x1_field)
g.message(f"R_2x1 field check: {eps}")
assert eps < 1e-13

# Without trace and real projection and field
R_2x1_notp = g.qcd.gauge.rectangle(U_transformed,
                                   2,
                                   1,
                                   trace=False,
                                   real=False,
                                   field=True)
eps = abs(g(g.sum(g.trace(R_2x1_notp))).real / U[0].grid.gsites - R_2x1)
g.message(f"R_2x1 field, no real and trace check: {eps}")
assert eps < 1e-13
Ejemplo n.º 17
0
Archivo: su_n.py Proyecto: spieseba/gpt
 def inner_product(self, left, right):
     if self.trace_norm is None:
         gen = self.generators(left.grid.precision.complex_dtype)
         self.trace_norm = numpy.trace(gen[0].array @ gen[0].array)
     return (gpt.sum(gpt(gpt.trace(left * right))) / self.trace_norm).real
Ejemplo n.º 18
0
assert eps < 1e-20

eps = g.norm2(g.adj(exp_ixp * exp_ixp) * exp_ixp * exp_ixp * l_dp -
              l_dp) / g.norm2(l_dp)
g.message("Momentum adj test (2): ", eps)
assert eps < 1e-20

################################################################################
# Test FFT
################################################################################
fft_l_sp = g.eval(g.fft() * l_sp)
eps = g.norm2(g.adj(g.fft()) * fft_l_sp - l_sp) / g.norm2(l_sp)
g.message("FFTinv * FFT:", eps)
assert eps < 1e-12

eps = g.norm2(g.sum(exp_ixp * l_sp) / np.prod(L) - fft_l_sp[1, 2, 3, 4])
g.message("FFT forward test:", eps)
assert eps < 1e-12

fft_mom_A = g.slice(
    g.exp_ixp(2.0 * np.pi * np.array([1, 2, 3, 0]) / L) * l_sp, 3) / np.prod(
        L[0:3])
fft_mom_B = [
    g.vcolor(x) for x in g.eval(g.fft([0, 1, 2]) * l_sp)[1, 2, 3, 0:L[3]]
]
for t in range(L[3]):
    eps = g.norm2(fft_mom_A[t] - fft_mom_B[t])
    assert eps < 1e-12

################################################################################
# Test vcomplex
Ejemplo n.º 19
0
Archivo: loops.py Proyecto: lehner/gpt
 def __iadd__(self, v):
     v = g(g.trace(v))
     self.value += g.sum(v) / v.grid.gsites
     return self
Ejemplo n.º 20
0
# compute conserved current divergence
div = g.mspin(grid)
div[:] = 0

for mu in range(4):
    tmp = qm.conserved_vector_current(dst_qm_bulk, src, dst_qm_bulk, src, mu)
    tmp -= g.cshift(tmp, mu, -1)
    div += g.color_trace(tmp)

div = g(g.trace(g.adj(div) * div))

g.message("div(conserved_current) contact term", div[0, 1, 0, 0].real)

div[0, 1, 0, 0] = 0

eps = g.sum(div).real
g.message(f"div(conserved_current) = {eps} without contact term")
assert eps < 1e-11

# compute partially conserved axial current divergence (zero momentum projected)
AP = g.slice(
    g.trace(
        qm.conserved_axial_current(dst_qm_bulk, src, dst_qm_bulk, src, 3) *
        g.gamma[5]), 3)
PP = g.slice(g.trace(dst_qm * g.adj(dst_qm)), 3)

Nt = grid.gdimensions[3]
for t in range(Nt):
    dAP_t = AP[t] - AP[(t - 1 + Nt) % Nt]
    mass_term = (PP[t] * 0.08 + J5q[t]) * 2.0
    eps = abs(dAP_t - mass_term) / abs(dAP_t + mass_term)
Ejemplo n.º 21
0
    def read_lattice(self):
        # define grid from header
        g = gpt.grid(self.fdimensions, self.precision)
        # create lattice
        l = [gpt.lattice(g, self.otype) for i in range(self.nfields)]

        # performance
        dt_distr, dt_cs, dt_read, dt_misc = 0.0, 0.0, 0.0, 0.0
        szGB = 0.0
        g.barrier()
        t0 = gpt.time()

        dt_read -= gpt.time()

        pos, nreader = distribute_cartesian_file(self.fdimensions, g,
                                                 l[0].checkerboard())

        if len(pos) > 0:
            sz = self.bytes_per_site * len(pos)
            f = gpt.FILE(self.path, "rb")
            f.seek(self.bytes_header + g.processor * sz, 0)
            data = memoryview(f.read(sz))
            f.close()

            dt_misc -= gpt.time()
            data = self.munge(data)
            dt_misc += gpt.time()

            dt_cs -= gpt.time()
            cs_comp = cgpt.util_nersc_checksum(data, 0)
            dt_cs += gpt.time()

            dt_misc -= gpt.time()
            data = self.reconstruct(data)

            assert len(data) % 8 == 0
            data_munged = cgpt.mview(
                cgpt.ndarray([len(data) // 8], numpy.float64))
            cgpt.munge_inner_outer(data_munged, data, self.nfields, len(pos))
            data = data_munged
            dt_misc += gpt.time()

            szGB += len(data) / 1024.0**3.0
        else:
            data = memoryview(bytearray())
            cs_comp = 0

        cs_comp = g.globalsum(cs_comp) & 0xFFFFFFFF
        cs_exp = int(self.metadata["CHECKSUM"].upper(), 16)
        if cs_comp != cs_exp:
            gpt.message(f"cs_comp={cs_comp:X} cs_exp={cs_exp:X}")
            assert False

        dt_read += gpt.time()

        # distributes data accordingly
        g.barrier()
        dt_distr -= gpt.time()
        cache = {}
        lblock = len(data) // self.nfields
        for i in range(self.nfields):
            l[i][pos, cache] = data[lblock * i:lblock * (i + 1)]
        g.barrier()
        dt_distr += gpt.time()

        g.barrier()
        t1 = gpt.time()

        szGB = g.globalsum(szGB)
        if self.verbose and dt_cs != 0.0:
            gpt.message(
                "Read %g GB at %g GB/s (%g GB/s for distribution, %g GB/s for munged read, %g GB/s for checksum, %g GB/s for munging, %d readers)"
                % (
                    szGB,
                    szGB / (t1 - t0),
                    szGB / dt_distr,
                    szGB / dt_read,
                    szGB / dt_cs,
                    szGB / dt_misc,
                    nreader,
                ))

        # also check plaquette and link trace
        P_comp = gpt.qcd.gauge.plaquette(l)
        P_exp = float(self.metadata["PLAQUETTE"])
        P_digits = len(self.metadata["PLAQUETTE"].split(".")[1])
        P_eps = abs(P_comp - P_exp)
        P_eps_threshold = 10.0**(-P_digits + 2)
        P_eps_threshold = max([1e2 * self.precision.eps, P_eps_threshold])
        assert P_eps < P_eps_threshold

        L_comp = (sum([
            gpt.sum(gpt.trace(x)) / x.grid.gsites / x.otype.shape[0] for x in l
        ]).real / self.nfields)
        L_exp = float(self.metadata["LINK_TRACE"])
        L_digits = len(
            self.metadata["LINK_TRACE"].split(".")[1].lower().split("e")[0])
        L_eps_threshold = 10.0**(-L_digits + 2)
        L_eps_threshold = max([1e2 * self.precision.eps, L_eps_threshold])
        L_eps = abs(L_comp - L_exp)
        assert L_eps < L_eps_threshold

        return l
Ejemplo n.º 22
0
Archivo: gauge.py Proyecto: wettig/gpt
# Test gauge invariance of R_2x1
R_2x1_transformed = g.qcd.gauge.rectangle(U_transformed, 2, 1)
eps = abs(R_2x1 - R_2x1_transformed)
g.message(
    f"R_2x1 before {R_2x1} and after {R_2x1_transformed} gauge transformation: {eps}"
)
assert eps < 1e-13

# Test gauge covariance of staple
rho = np.array([[0.0 if i == j else 0.1 for i in range(4)] for j in range(4)],
               dtype=np.float64)
C = g.qcd.gauge.staple_sum(U, rho=rho)
C_transformed = g.qcd.gauge.staple_sum(U_transformed, rho=rho)
for mu in range(len(C)):
    q = g.sum(g.trace(C[mu] * g.adj(U[mu]))) / U[0].grid.gsites
    q_transformed = (
        g.sum(g.trace(C_transformed[mu] * g.adj(U_transformed[mu]))) /
        U[0].grid.gsites)

    eps = abs(q - q_transformed)
    g.message(
        f"Staple q[{mu}] before {q} and after {q_transformed} gauge transformation: {eps}"
    )
    assert eps < 1e-14

# Test stout smearing
U_stout = U
P_stout = []
for i in range(3):
    U_stout = g.qcd.gauge.smear.stout(U_stout, rho=0.1)
Ejemplo n.º 23
0
Archivo: create.py Proyecto: wettig/gpt
#
import gpt as g
import numpy as np

# test sources
rng = g.random("test")
L = [8, 8, 8, 16]
grid = g.grid(L, g.double)
c = g.create

src = g.mspincolor(grid)
c.wall.z2(src, 1, rng)
g.message("Test Z2 wall")

# simple test of correct norm
x_val = g.sum(g.trace(src * src))
x_exp = L[0] * L[1] * L[2] * 12
eps = abs(x_val - x_exp) / abs(x_val)
g.message(f"Norm test: {eps}")
assert eps < 1e-13

# test wall
test1 = rng.cnormal(g.mspincolor(grid), mu=1.0, sigma=1.0)
test2 = rng.cnormal(g.mspincolor(grid), mu=1.0, sigma=1.0)
x_val = g.sum(g.trace(src * test1 * src * test2))
tmp1 = g.lattice(test1)
tmp1[:] = 0
tmp1[:, :, :, 1] = test1[:, :, :, 1]
tmp2 = g.lattice(test2)
tmp2[:] = 0
tmp2[:, :, :, 1] = test2[:, :, :, 1]