Esempio n. 1
0
def main():
    filename_end = sys.argv[1]

    check_laser(filename_end)

    test_name = os.path.split(os.getcwd())[1]
    checksumAPI.evaluate_checksum(test_name, filename_end)
Esempio n. 2
0
def check():
    filename = sys.argv[1]
    data_set_end = yt.load(filename)

    sim_time = data_set_end.current_time.to_value()

    #simulation results
    all_data = data_set_end.all_data()
    spec_names = [cc.name for cc in cases]

    #All momenta
    res_mom = np.array([
        np.array([
            all_data[sp, 'particle_momentum_x'].v[0],
            all_data[sp, 'particle_momentum_y'].v[0],
            all_data[sp, 'particle_momentum_z'].v[0]
        ]) for sp in spec_names
    ])

    for cc in zip(cases, res_mom):
        init_gamma = gamma(cc[0].init_mom)
        end_gamma = gamma(cc[1] / m_e / c)
        exp_gamma = exp_res(cc[0], sim_time)

        error_rel = np.abs(end_gamma - exp_gamma) / exp_gamma

        print("error_rel    : " + str(error_rel))
        print("tolerance_rel: " + str(tolerance_rel))

        assert (error_rel < tolerance_rel)

    test_name = os.path.split(os.getcwd())[1]
    checksumAPI.evaluate_checksum(test_name, filename)
Esempio n. 3
0
def main():
    print("Opening yt output")
    filename_end = sys.argv[1]
    data_set_end = yt.load(filename_end)

    # get simulation time
    sim_time = data_set_end.current_time.to_value()

    # get particle data
    all_data_end = data_set_end.all_data()
    particle_data = {}

    names, types = ac.get_all_species_names_and_types()
    for spec_name_type in zip(names, types):
        spec_name = spec_name_type[0]
        is_photon = spec_name_type[1]
        data = {}
        data["px"] = all_data_end[spec_name,"particle_momentum_x"].v
        data["py"] = all_data_end[spec_name,"particle_momentum_y"].v
        data["pz"] = all_data_end[spec_name,"particle_momentum_z"].v
        data["w"] = all_data_end[spec_name,"particle_weighting"].v

        if is_photon :
            data["opt"] =  all_data_end[spec_name, "particle_opticalDepthBW"].v
        else:
            data["opt"] = all_data_end[spec_name, "particle_opticalDepthQSR"].v

        particle_data[spec_name] = data

    ac.check(sim_time, particle_data)

    test_name = filename_end[:-9] # Could also be os.path.split(os.getcwd())[1]
    checksumAPI.evaluate_checksum(test_name, filename_end)
Esempio n. 4
0
def check():
    filename = sys.argv[1]
    data_set = yt.load(filename)

    all_data = data_set.all_data()
    res_ele_tau = all_data["electrons", 'particle_optical_depth_QSR']
    res_pos_tau = all_data["positrons", 'particle_optical_depth_QSR']

    loc_ele, scale_ele = st.expon.fit(res_ele_tau)
    loc_pos, scale_pos = st.expon.fit(res_pos_tau)

    # loc should be very close to 0, scale should be very close to 1
    error_rel = np.abs(loc_ele - 0)
    print("error_rel loc_ele: " + str(error_rel))
    assert (error_rel < tolerance_rel)

    error_rel = np.abs(loc_pos - 0)
    print("error_rel loc_pos: " + str(error_rel))
    assert (error_rel < tolerance_rel)

    error_rel = np.abs(scale_ele - 1)
    print("error_rel scale_ele: " + str(error_rel))
    assert (error_rel < tolerance_rel)

    error_rel = np.abs(scale_pos - 1)
    print("error_rel scale_pos: " + str(error_rel))
    assert (error_rel < tolerance_rel)

    test_name = filename[:-9]  # Could also be os.path.split(os.getcwd())[1]
    checksumAPI.evaluate_checksum(test_name, filename)
Esempio n. 5
0
def check():
    filename_end = sys.argv[1]
    data_set_end = yt.load(filename_end)

    sim_time = data_set_end.current_time.to_value()
    all_data_end = data_set_end.all_data()

    for idx in range(4):
        phot_name = spec_names_phot[idx]
        ele_name = spec_names_ele[idx]
        pos_name = spec_names_pos[idx]
        p0 = initial_momenta[idx]

        p2_phot = p0[0]**2 + p0[1]**2 + p0[2]**2
        p_phot = np.sqrt(p2_phot)
        energy_phot = p_phot * c
        chi_phot = calc_chi_gamma(p0, E_f, B_f)
        gamma_phot = np.linalg.norm(p0) / mec

        print("** Case {:d} **".format(idx + 1))
        print("  initial momentum: ", p0)
        print("  quantum parameter: {:f}".format(chi_phot))
        print("  normalized photon energy: {:f}".format(gamma_phot))
        print("  timestep: {:f} fs".format(sim_time * 1e15))

        phot_data = get_spec(all_data_end, phot_name, is_photon=True)
        ele_data = get_spec(all_data_end, ele_name, is_photon=False)
        pos_data = get_spec(all_data_end, pos_name, is_photon=False)

        p2_ele = ele_data["px"]**2 + ele_data["py"]**2 + ele_data["pz"]**2
        p_ele = np.sqrt(p2_ele)
        energy_ele = np.sqrt(1.0 + p2_ele / mec**2) * mec2
        p2_pos = pos_data["px"]**2 + pos_data["py"]**2 + pos_data["pz"]**2
        p_pos = np.sqrt(p2_pos)
        energy_pos = np.sqrt(1.0 + p2_pos / mec**2) * mec2

        n_lost = check_number_of_pairs(data_set_end, phot_name, ele_name,
                                       pos_name, chi_phot, gamma_phot,
                                       sim_time, initial_particle_number)

        check_weights(phot_data, ele_data, pos_data)

        check_momenta(phot_data, ele_data, pos_data, p0, p_ele, p_pos)

        check_energy(energy_phot, energy_ele, energy_pos)

        check_energy_distrib(energy_ele, energy_pos, gamma_phot, chi_phot,
                             n_lost, NNS[idx], idx)

        check_opt_depths(phot_data, ele_data, pos_data)

        print("*************\n")

    test_name = filename_end[:
                             -9]  # Could also be os.path.split(os.getcwd())[1]
    checksumAPI.evaluate_checksum(test_name, filename_end)
Esempio n. 6
0
def check():
    filename_end = sys.argv[1]
    data_set_end = yt.load(filename_end)

    sim_time = data_set_end.current_time.to_value()
    # no particles can be created on the first timestep so we have 2 timesteps in the test case,
    # with only the second one resulting in particle creation
    dt = sim_time / 2.

    all_data_end = data_set_end.all_data()

    for idx in range(4):
        part_name = spec_names[idx]
        phot_name = spec_names_phot[idx]
        t_pi = initial_momenta[idx]
        pm = boris(t_pi, -dt * 0.5, csign[idx])
        p0 = boris(pm, dt * 1.0, csign[idx])

        p2_part = p0[0]**2 + p0[1]**2 + p0[2]**2
        energy_part = np.sqrt(mec2**2 + p2_part * c**2)
        gamma_part = energy_part / mec2
        chi_part = calc_chi_part(p0, E_f, B_f)

        print("** Case {:d} **".format(idx + 1))
        print("  initial momentum: ", t_pi)
        print("  quantum parameter: {:f}".format(chi_part))
        print("  normalized particle energy: {:f}".format(gamma_part))
        print("  timestep: {:f} fs".format(dt * 1e15))

        part_data_final = get_spec(all_data_end, part_name, is_photon=False)
        phot_data = get_spec(all_data_end, phot_name, is_photon=True)

        p_phot = np.sqrt(phot_data["px"]**2 + phot_data["py"]**2 +
                         phot_data["pz"]**2)
        energy_phot = p_phot * c
        gamma_phot = energy_phot / mec2

        n_phot = check_number_of_photons(data_set_end, part_name, phot_name,
                                         chi_part, gamma_part, dt,
                                         initial_particle_number)

        check_weights(part_data_final, phot_data)

        check_momenta(phot_data, p_phot, p0)

        check_energy_distrib(gamma_phot, chi_part, gamma_part, n_phot,
                             NNS[idx], idx)

        check_opt_depths(part_data_final, phot_data)

        print("*************\n")

    test_name = os.path.split(os.getcwd())[1]
    checksumAPI.evaluate_checksum(test_name, filename_end)
Esempio n. 7
0
def check():
    filename = sys.argv[1]
    data_set_end = yt.load(filename)

    sim_time = data_set_end.current_time.to_value()

    #expected positions list
    ll = sim_time * c
    answ_pos = init_pos + \
    ll*gamma_beta_list/np.linalg.norm(gamma_beta_list,axis=1, keepdims=True)

    #expected momenta list
    answ_mom = m_e * c * gamma_beta_list  #momenta don't change

    #simulation results
    all_data = data_set_end.all_data()
    res_pos = [
        np.array([
            all_data[sp, 'particle_position_x'].v[0],
            all_data[sp, 'particle_position_y'].v[0],
            all_data[sp, 'particle_position_z'].v[0]
        ]) for sp in spec_names
    ]
    res_mom = [
        np.array([
            all_data[sp, 'particle_momentum_x'].v[0],
            all_data[sp, 'particle_momentum_y'].v[0],
            all_data[sp, 'particle_momentum_z'].v[0]
        ]) for sp in spec_names
    ]

    #check discrepancies
    disc_pos = [
        np.linalg.norm(a - b) / np.linalg.norm(b)
        for a, b in zip(res_pos, answ_pos)
    ]
    disc_mom = [
        np.linalg.norm(a - b) / np.linalg.norm(b)
        for a, b in zip(res_mom, answ_mom)
    ]

    print("max(disc_pos) = %s" % max(disc_pos))
    print("tol_pos = %s" % tol_pos)
    print("max(disc_mom) = %s" % max(disc_mom))
    print("tol_mom = %s" % tol_mom)

    assert ((max(disc_pos) <= tol_pos) and (max(disc_mom) <= tol_mom))

    test_name = filename[:-9]  # Could also be os.path.split(os.getcwd())[1]
    checksumAPI.evaluate_checksum(test_name, filename)
def check():
    filename_end = sys.argv[1]
    data_set_end = yt.load(filename_end)

    sim_time = data_set_end.current_time.to_value()
    all_data_end = data_set_end.all_data()

    for name in spec_names:
        opt_depth = all_data_end[name, 'particle_optical_depth_BW']

        #check that the distribution is still exponential with scale 1 and loc 0
        opt_depth_loc, opt_depth_scale = st.expon.fit(opt_depth)
        exp_loc = 0.0
        exp_scale = 1.0
        loc_discrepancy = np.abs(opt_depth_loc-exp_loc)
        scale_discrepancy = np.abs(opt_depth_scale-exp_scale)
        print("tolerance_rel: " + str(tol))
        print("species " + name)
        print("exp distrib loc tol = " + str(tol))
        print("exp distrib loc discrepancy = " + str(loc_discrepancy))
        assert(loc_discrepancy < tol)
        print("exp distrib scale tol = " + str(tol))
        print("exp distrib scale discrepancy = " + str(scale_discrepancy/exp_scale))
        assert(scale_discrepancy/exp_scale < tol)
        ###

        #check if number of lost photons is (n0* (1 - exp(-rate*t)) )
        dNBW_dt_theo = dNBW_dt(
            calc_chi_gamma(p_begin[name], E_f, B_f),
                np.linalg.norm(p_begin[name]*speed_of_light))
        exp_lost= initial_particle_number*(1.0 - np.exp(-dNBW_dt_theo*sim_time))
        lost =  initial_particle_number-np.size(opt_depth)
        discrepancy_lost = np.abs(exp_lost-lost)
        print("lost fraction tol = " + str(tol))
        print("lost fraction discrepancy = " + str(discrepancy_lost/exp_lost))
        assert(discrepancy_lost/exp_lost < tol)
        ###

    test_name = filename_end[:-9] # Could also be os.path.split(os.getcwd())[1]
    checksumAPI.evaluate_checksum(test_name, filename_end)
Esempio n. 9
0
def main():
    filename_end = sys.argv[1]
    filename_start = filename_end[:-4] + '0000'
    ds_end = yt.load(filename_end)
    ds_start = yt.load(filename_start)
    ad_end = ds_end.all_data()
    ad_start = ds_start.all_data()
    field_data_end = ds_end.covering_grid(level=0,
                                          left_edge=ds_end.domain_left_edge,
                                          dims=ds_end.domain_dimensions)
    field_data_start = ds_start.covering_grid(
        level=0,
        left_edge=ds_start.domain_left_edge,
        dims=ds_start.domain_dimensions)

    ntests = 5
    for i in range(1, ntests + 1):
        proton_species = "proton" + str(i)
        boron_species = "boron" + str(i)
        alpha_species = "alpha" + str(i)
        data = {}
        add_species_to_dict(ad_start, data, proton_species, "proton", "start")
        add_species_to_dict(ad_start, data, boron_species, "boron", "start")
        add_species_to_dict(ad_end, data, proton_species, "proton", "end")
        add_species_to_dict(ad_end, data, boron_species, "boron", "end")
        add_species_to_dict(ad_end, data, alpha_species, "alpha", "end")

        # General checks that are performed for all tests
        generic_check(data)

        # Checks that are specific to test number i
        eval("specific_check" + str(i) + "(data)")

    rho_start = field_data_start["rho"].to_ndarray()
    rho_end = field_data_end["rho"].to_ndarray()
    check_charge_conservation(rho_start, rho_end)

    test_name = os.path.split(os.getcwd())[1]
    checksumAPI.evaluate_checksum(test_name, filename_end)
Esempio n. 10
0
def check():
    filename = sys.argv[1]
    data_set = yt.load(filename)

    all_data = data_set.all_data()
    res_tau = all_data["photons", 'particle_optical_depth_BW']

    loc, scale = st.expon.fit(res_tau)

    # loc should be very close to 0, scale should be very close to 1

    error_rel = np.abs(loc - 0)
    print("error_rel for location: " + str(error_rel))
    print("tolerance_rel: " + str(tolerance_rel))
    assert (error_rel < tolerance_rel)

    error_rel = np.abs(scale - 1)
    print("error_rel for scale: " + str(error_rel))
    print("tolerance_rel: " + str(tolerance_rel))
    assert (error_rel < tolerance_rel)

    test_name = filename[:-9]  # Could also be os.path.split(os.getcwd())[1]
    checksumAPI.evaluate_checksum(test_name, filename)
Esempio n. 11
0
def main():
    print("Opening yt output")
    filename_end = sys.argv[1]
    data_set_end = yt.load(filename_end)

    # get simulation time
    sim_time = data_set_end.current_time.to_value()
    # no particles can be created on the first timestep so we have 2 timesteps in the test case,
    # with only the second one resulting in particle creation
    dt = sim_time/2.

    # get particle data
    all_data_end = data_set_end.all_data()
    particle_data = {}

    names, types = ac.get_all_species_names_and_types()
    for spec_name_type in zip(names, types):
        spec_name = spec_name_type[0]
        is_photon = spec_name_type[1]
        data = {}
        data["px"] = all_data_end[spec_name,"particle_momentum_x"].v
        data["py"] = all_data_end[spec_name,"particle_momentum_y"].v
        data["pz"] = all_data_end[spec_name,"particle_momentum_z"].v
        data["w"] = all_data_end[spec_name,"particle_weighting"].v

        if is_photon :
            data["opt"] =  all_data_end[spec_name, "particle_opticalDepthBW"].v
        else:
            data["opt"] = all_data_end[spec_name, "particle_opticalDepthQSR"].v

        particle_data[spec_name] = data

    ac.check(dt, particle_data)

    test_name = os.path.split(os.getcwd())[1]
    checksumAPI.evaluate_checksum(test_name, filename_end)
Esempio n. 12
0
            y = y0 * np.cos(-theta) - z0 * np.sin(-theta)
            z = y0 * np.sin(-theta) + z0 * np.cos(-theta)

            By = -2 / h_2 * mu_0 * (n * pi / Ly) * (p * pi / Lz) * (
                np.cos(m * pi / Lx * (x - Lx / 2)) * np.sin(n * pi / Ly *
                                                            (y - Ly / 2)) *
                np.cos(p * pi / Lz *
                       (z - Lz / 2)) * np.cos(np.sqrt(2) * np.pi / Lx * c * t))

            Bz = mu_0 * (np.cos(m * pi / Lx * (x - Lx / 2)) *
                         np.cos(n * pi / Ly *
                                (y - Ly / 2)) * np.sin(p * pi / Lz *
                                                       (z - Lz / 2)) *
                         np.cos(np.sqrt(2) * np.pi / Lx * c * t))

            Bz_th[i, j, k] = (By * np.sin(theta) +
                              Bz * np.cos(theta)) * (Bz_sim[i, j, k, 0] != 0)

# Compute relative l^2 error on By
rel_err_y = np.sqrt(
    np.sum(np.square(By_sim[:, :, :, 0] - By_th)) / np.sum(np.square(By_th)))
assert (rel_err_y < rel_tol_err)
# Compute relative l^2 error on Bz
rel_err_z = np.sqrt(
    np.sum(np.square(Bz_sim[:, :, :, 0] - Bz_th)) / np.sum(np.square(Bz_th)))
assert (rel_err_z < rel_tol_err)

test_name = os.path.split(os.getcwd())[1]

checksumAPI.evaluate_checksum(test_name, filename)
Esempio n. 13
0
#!/usr/bin/env python3

import os
import sys

sys.path.insert(1, '../../../../warpx/Regression/Checksum/')
import checksumAPI

# this will be the name of the plot file
fn = sys.argv[1]

# Get name of the test
test_name = os.path.split(os.getcwd())[1]

# Run checksum regression test
checksumAPI.evaluate_checksum(test_name, fn, rtol=1e-2)
Esempio n. 14
0
def do_analysis(single_precision=False):
    fn = sys.argv[1]

    ds = yt.load(fn)
    ad = ds.all_data()
    ad0 = ds.covering_grid(level=0,
                           left_edge=ds.domain_left_edge,
                           dims=ds.domain_dimensions)

    #--------------------------------------------------------------------------------------------------
    # Part 1: get results from plotfiles (label '_yt')
    #--------------------------------------------------------------------------------------------------

    # Quantities computed from plotfiles
    values_yt = dict()

    domain_size = ds.domain_right_edge.value - ds.domain_left_edge.value
    dx = domain_size / ds.domain_dimensions

    # Electrons
    x = ad['electrons', 'particle_position_x'].to_ndarray()
    y = ad['electrons', 'particle_position_y'].to_ndarray()
    z = ad['electrons', 'particle_position_z'].to_ndarray()
    uz = ad['electrons', 'particle_momentum_z'].to_ndarray() / m_e / c
    w = ad['electrons', 'particle_weight'].to_ndarray()

    x_ind = ((x - ds.domain_left_edge[0].value) / dx[0]).astype(int)
    y_ind = ((y - ds.domain_left_edge[1].value) / dx[1]).astype(int)
    z_ind = ((z - ds.domain_left_edge[2].value) / dx[2]).astype(int)

    zavg = np.zeros(ds.domain_dimensions)
    uzavg = np.zeros(ds.domain_dimensions)
    zuzavg = np.zeros(ds.domain_dimensions)
    wavg = np.zeros(ds.domain_dimensions)

    for i_p in range(len(x)):
        zavg[x_ind[i_p], y_ind[i_p], z_ind[i_p]] += z[i_p] * w[i_p]
        uzavg[x_ind[i_p], y_ind[i_p], z_ind[i_p]] += uz[i_p] * w[i_p]
        zuzavg[x_ind[i_p], y_ind[i_p], z_ind[i_p]] += z[i_p] * uz[i_p] * w[i_p]
        wavg[x_ind[i_p], y_ind[i_p], z_ind[i_p]] += w[i_p]

    wavg_adj = np.where(wavg == 0, 1, wavg)
    values_yt['electrons: zavg'] = zavg / wavg_adj
    values_yt['electrons: uzavg'] = uzavg / wavg_adj
    values_yt['electrons: zuzavg'] = zuzavg / wavg_adj

    # protons
    x = ad['protons', 'particle_position_x'].to_ndarray()
    y = ad['protons', 'particle_position_y'].to_ndarray()
    z = ad['protons', 'particle_position_z'].to_ndarray()
    uz = ad['protons', 'particle_momentum_z'].to_ndarray() / m_p / c
    w = ad['protons', 'particle_weight'].to_ndarray()

    x_ind = ((x - ds.domain_left_edge[0].value) / dx[0]).astype(int)
    y_ind = ((y - ds.domain_left_edge[1].value) / dx[1]).astype(int)
    z_ind = ((z - ds.domain_left_edge[2].value) / dx[2]).astype(int)

    zavg = np.zeros(ds.domain_dimensions)
    uzavg = np.zeros(ds.domain_dimensions)
    zuzavg = np.zeros(ds.domain_dimensions)
    wavg = np.zeros(ds.domain_dimensions)

    for i_p in range(len(x)):
        zavg[x_ind[i_p], y_ind[i_p], z_ind[i_p]] += z[i_p] * w[i_p]
        uzavg[x_ind[i_p], y_ind[i_p], z_ind[i_p]] += uz[i_p] * w[i_p]
        zuzavg[x_ind[i_p], y_ind[i_p], z_ind[i_p]] += z[i_p] * uz[i_p] * w[i_p]
        wavg[x_ind[i_p], y_ind[i_p], z_ind[i_p]] += w[i_p]

    wavg_adj = np.where(wavg == 0, 1, wavg)
    values_yt['protons: zavg'] = zavg / wavg_adj
    values_yt['protons: uzavg'] = uzavg / wavg_adj
    values_yt['protons: zuzavg'] = zuzavg / wavg_adj

    # Photons (momentum in units of m_e c)
    x = ad['photons', 'particle_position_x'].to_ndarray()
    y = ad['photons', 'particle_position_y'].to_ndarray()
    z = ad['photons', 'particle_position_z'].to_ndarray()
    uz = ad['photons', 'particle_momentum_z'].to_ndarray() / m_e / c
    w = ad['photons', 'particle_weight'].to_ndarray()

    x_ind = ((x - ds.domain_left_edge[0].value) / dx[0]).astype(int)
    y_ind = ((y - ds.domain_left_edge[1].value) / dx[1]).astype(int)
    z_ind = ((z - ds.domain_left_edge[2].value) / dx[2]).astype(int)

    zavg = np.zeros(ds.domain_dimensions)
    uzavg = np.zeros(ds.domain_dimensions)
    zuzavg = np.zeros(ds.domain_dimensions)
    wavg = np.zeros(ds.domain_dimensions)

    for i_p in range(len(x)):
        zavg[x_ind[i_p], y_ind[i_p], z_ind[i_p]] += z[i_p] * w[i_p]
        uzavg[x_ind[i_p], y_ind[i_p], z_ind[i_p]] += uz[i_p] * w[i_p]
        zuzavg[x_ind[i_p], y_ind[i_p], z_ind[i_p]] += z[i_p] * uz[i_p] * w[i_p]
        wavg[x_ind[i_p], y_ind[i_p], z_ind[i_p]] += w[i_p]

    wavg_adj = np.where(wavg == 0, 1, wavg)
    values_yt['photons: zavg'] = zavg / wavg_adj
    values_yt['photons: uzavg'] = uzavg / wavg_adj
    values_yt['photons: zuzavg'] = zuzavg / wavg_adj

    values_rd = dict()
    # Load reduced particle diagnostic data from plotfiles
    values_rd['electrons: zavg'] = ad0[('boxlib', 'z_electrons')]
    values_rd['protons: zavg'] = ad0[('boxlib', 'z_protons')]
    values_rd['photons: zavg'] = ad0[('boxlib', 'z_photons')]

    values_rd['electrons: uzavg'] = ad0[('boxlib', 'uz_electrons')]
    values_rd['protons: uzavg'] = ad0[('boxlib', 'uz_protons')]
    values_rd['photons: uzavg'] = ad0[('boxlib', 'uz_photons')]

    values_rd['electrons: zuzavg'] = ad0[('boxlib', 'zuz_electrons')]
    values_rd['protons: zuzavg'] = ad0[('boxlib', 'zuz_protons')]
    values_rd['photons: zuzavg'] = ad0[('boxlib', 'zuz_photons')]

    #--------------------------------------------------------------------------------------------------
    # Part 3: compare values from plotfiles and diagnostics and print output
    #--------------------------------------------------------------------------------------------------

    error = dict()
    tolerance = 5e-3 if single_precision else 1e-12

    for k in values_yt.keys():
        # check that the zeros line up, since we'll be ignoring them in the error calculation
        assert (np.all((values_yt[k] == 0) == (values_rd[k] == 0)))
        error[k] = np.max(
            abs(values_yt[k] - values_rd[k])[values_yt[k] != 0] /
            abs(values_yt[k])[values_yt[k] != 0])
        print(k, 'relative error = ', error[k])
        assert (error[k] < tolerance)

    test_name = os.path.split(os.getcwd())[1]
    checksumAPI.evaluate_checksum(test_name, fn)
Esempio n. 15
0
#!/usr/bin/env python3

# Copyright 2021 Modern Electron

# This script checks that the PICMI_inputs_2d.py run more-or-less matches the
# results from the non-PICMI run. The PICMI run is using an external Poisson
# solver that directly solves the Poisson equation using matrix inversion
# rather than the iterative approach from the MLMG solver.

import sys

sys.path.append('../../../../warpx/Regression/Checksum/')

import checksumAPI

my_check = checksumAPI.evaluate_checksum(
    'background_mcc', 'Python_background_mcc_plt00050',
    do_particles=True, rtol=3.7e-3
)
Esempio n. 16
0
error = error / nt

print('error = ', error)
print('tolerance = ', tolerance)
assert(error < tolerance)


## In the second past of the test, we verify that the diagnostic particle filter function works as
## expected. For this, we only use the last simulation timestep.

dim = "2d"
species_name = "electron"

parser_filter_fn = "diags/diag_parser_filter" + last_it
parser_filter_expression = "(x>200) * (z<200) * (px-3*pz>0)"
post_processing_utils.check_particle_filter(last_fn, parser_filter_fn, parser_filter_expression,
                                            dim, species_name)

uniform_filter_fn = "diags/diag_uniform_filter" + last_it
uniform_filter_expression = "ids%6 == 0"
post_processing_utils.check_particle_filter(last_fn, uniform_filter_fn, uniform_filter_expression,
                                            dim, species_name)

random_filter_fn = "diags/diag_random_filter" + last_it
random_fraction = 0.77
post_processing_utils.check_random_filter(last_fn, random_filter_fn, random_fraction,
                                          dim, species_name)

test_name = os.path.split(os.getcwd())[1]
checksumAPI.evaluate_checksum(test_name, fn, do_particles=False)
Esempio n. 17
0
#!/usr/bin/env python3

import os
import re
import sys

sys.path.insert(1, '../../../../warpx/Regression/Checksum/')
import checksumAPI

# this will be the name of the plot file
fn = sys.argv[1]

# Get name of the test
test_name = os.path.split(os.getcwd())[1]

# Run checksum regression test
if re.search('single_precision', fn):
    checksumAPI.evaluate_checksum(test_name, fn, rtol=2.e-6)
else:
    checksumAPI.evaluate_checksum(test_name, fn)
Esempio n. 18
0
#!/usr/bin/env python3

# Run the default regression test for the PICMI version of the EB test
# using the same reference file as for the non-PICMI test since the two
# tests are otherwise the same.

import sys

sys.path.append('../../../../warpx/Regression/Checksum/')

import checksumAPI

my_check = checksumAPI.evaluate_checksum(
    'ElectrostaticSphereEB',
    'Python_ElectrostaticSphereEB_plt000001',
    do_particles=False,
    atol=1e-12)
Esempio n. 19
0
plt.title('Ex: Simulation')
plt.imshow(make_2d(Ex_array))
plt.colorbar()
plt.subplot(223)
plt.title('Ey: Theory')
plt.imshow(make_2d(Ey_th))
plt.colorbar()
plt.subplot(224)
plt.title('Ey: Simulation')
plt.imshow(make_2d(Ey_array))
plt.colorbar()
plt.savefig('Comparison.png')


# Automatically check the results
def check(E, E_th, label):
    print('Relative error in %s: %.3f' %
          (label, abs(E - E_th).max() / E_th.max()))
    tolerance_rel = 0.1
    print("tolerance_rel: " + str(tolerance_rel))
    assert np.allclose(E, E_th, atol=tolerance_rel * E_th.max())


check(Ex_array, Ex_th, 'Ex')
check(Ey_array, Ey_th, 'Ey')
if ds.dimensionality == 3:
    check(Ez_array, Ez_th, 'Ez')

test_name = filename[:-9]  # Could also be os.path.split(os.getcwd())[1]
checksumAPI.evaluate_checksum(test_name, filename, do_particles=0)
def do_analysis(single_precision=False):
    fn = sys.argv[1]

    ds = yt.load(fn)
    ad = ds.all_data()

    #--------------------------------------------------------------------------------------------------
    # Part 1: get results from plotfiles (label '_yt')
    #--------------------------------------------------------------------------------------------------

    # Quantities computed from plotfiles
    values_yt = dict()

    # Electrons
    px = ad['electrons', 'particle_momentum_x'].to_ndarray()
    py = ad['electrons', 'particle_momentum_y'].to_ndarray()
    pz = ad['electrons', 'particle_momentum_z'].to_ndarray()
    w = ad['electrons', 'particle_weight'].to_ndarray()
    p2 = px**2 + py**2 + pz**2

    # Accumulate particle energy, store number of particles and sum of weights
    e_u2 = p2 / (m_e**2 * c**2)
    e_gamma = np.sqrt(1 + e_u2)
    e_energy = (m_e * c**2) * np.where(
        e_gamma > gamma_relativistic_threshold, e_gamma - 1,
        (e_u2) / 2 - (e_u2**2) / 8 + (e_u2**3) / 16 - (e_u2**4) * (5 / 128) +
        (e_u2**5) * (7 / 256))
    values_yt['electrons: particle energy'] = np.sum(e_energy * w)
    values_yt['electrons: particle momentum in x'] = np.sum(px * w)
    values_yt['electrons: particle momentum in y'] = np.sum(py * w)
    values_yt['electrons: particle momentum in z'] = np.sum(pz * w)
    values_yt['electrons: number of particles'] = w.shape[0]
    values_yt['electrons: sum of weights'] = np.sum(w)

    # Protons
    px = ad['protons', 'particle_momentum_x'].to_ndarray()
    py = ad['protons', 'particle_momentum_y'].to_ndarray()
    pz = ad['protons', 'particle_momentum_z'].to_ndarray()
    w = ad['protons', 'particle_weight'].to_ndarray()
    p2 = px**2 + py**2 + pz**2

    # Accumulate particle energy, store number of particles and sum of weights
    p_u2 = p2 / (m_p**2 * c**2)
    p_gamma = np.sqrt(1 + p_u2)
    p_energy = (m_p * c**2) * np.where(
        p_gamma > gamma_relativistic_threshold, p_gamma - 1,
        (p_u2) / 2 - (p_u2**2) / 8 + (p_u2**3) / 16 - (p_u2**4) * (5 / 128) +
        (p_u2**5) * (7 / 256))
    values_yt['protons: particle energy'] = np.sum(p_energy * w)
    values_yt['protons: particle momentum in x'] = np.sum(px * w)
    values_yt['protons: particle momentum in y'] = np.sum(py * w)
    values_yt['protons: particle momentum in z'] = np.sum(pz * w)
    values_yt['protons: number of particles'] = w.shape[0]
    values_yt['protons: sum of weights'] = np.sum(w)

    # Photons
    px = ad['photons', 'particle_momentum_x'].to_ndarray()
    py = ad['photons', 'particle_momentum_y'].to_ndarray()
    pz = ad['photons', 'particle_momentum_z'].to_ndarray()
    w = ad['photons', 'particle_weight'].to_ndarray()
    p2 = px**2 + py**2 + pz**2

    # Accumulate particle energy, store number of particles and sum of weights
    values_yt['photons: particle energy'] = np.sum(np.sqrt(p2 * c**2) * w)
    values_yt['photons: particle momentum in x'] = np.sum(px * w)
    values_yt['photons: particle momentum in y'] = np.sum(py * w)
    values_yt['photons: particle momentum in z'] = np.sum(pz * w)
    values_yt['photons: number of particles'] = w.shape[0]
    values_yt['photons: sum of weights'] = np.sum(w)

    # Accumulate total particle diagnostics

    values_yt['particle energy'] = values_yt['electrons: particle energy'] \
                                + values_yt['protons: particle energy'] \
                                + values_yt['photons: particle energy']

    values_yt['particle momentum in x'] = values_yt['electrons: particle momentum in x'] \
                                        + values_yt['protons: particle momentum in x'] \
                                        + values_yt['photons: particle momentum in x']

    values_yt['particle momentum in y'] = values_yt['electrons: particle momentum in y'] \
                                        + values_yt['protons: particle momentum in y'] \
                                        + values_yt['photons: particle momentum in y']

    values_yt['particle momentum in z'] = values_yt['electrons: particle momentum in z'] \
                                        + values_yt['protons: particle momentum in z'] \
                                        + values_yt['photons: particle momentum in z']

    values_yt['number of particles'] = values_yt['electrons: number of particles'] \
                                    + values_yt['protons: number of particles'] \
                                    + values_yt['photons: number of particles']

    values_yt['sum of weights'] = values_yt['electrons: sum of weights'] \
                                + values_yt['protons: sum of weights'] \
                                + values_yt['photons: sum of weights']

    values_yt['mean particle energy'] = values_yt[
        'particle energy'] / values_yt['sum of weights']

    values_yt['mean particle momentum in x'] = values_yt[
        'particle momentum in x'] / values_yt['sum of weights']
    values_yt['mean particle momentum in y'] = values_yt[
        'particle momentum in y'] / values_yt['sum of weights']
    values_yt['mean particle momentum in z'] = values_yt[
        'particle momentum in z'] / values_yt['sum of weights']

    values_yt['electrons: mean particle energy'] = values_yt['electrons: particle energy'] \
                                                / values_yt['electrons: sum of weights']

    values_yt['electrons: mean particle momentum in x'] = values_yt['electrons: particle momentum in x'] \
                                                    / values_yt['electrons: sum of weights']
    values_yt['electrons: mean particle momentum in y'] = values_yt['electrons: particle momentum in y'] \
                                                    / values_yt['electrons: sum of weights']
    values_yt['electrons: mean particle momentum in z'] = values_yt['electrons: particle momentum in z'] \
                                                    / values_yt['electrons: sum of weights']

    values_yt['protons: mean particle energy'] = values_yt['protons: particle energy'] \
                                            / values_yt['protons: sum of weights']

    values_yt['protons: mean particle momentum in x'] = values_yt['protons: particle momentum in x'] \
                                                    / values_yt['protons: sum of weights']
    values_yt['protons: mean particle momentum in y'] = values_yt['protons: particle momentum in y'] \
                                                    / values_yt['protons: sum of weights']
    values_yt['protons: mean particle momentum in z'] = values_yt['protons: particle momentum in z'] \
                                                    / values_yt['protons: sum of weights']

    values_yt['photons: mean particle energy'] = values_yt['photons: particle energy'] \
                                            / values_yt['photons: sum of weights']

    values_yt['photons: mean particle momentum in x'] = values_yt['photons: particle momentum in x'] \
                                                    / values_yt['photons: sum of weights']
    values_yt['photons: mean particle momentum in y'] = values_yt['photons: particle momentum in y'] \
                                                    / values_yt['photons: sum of weights']
    values_yt['photons: mean particle momentum in z'] = values_yt['photons: particle momentum in z'] \
                                                    / values_yt['photons: sum of weights']

    # Load 3D data from plotfiles
    ad = ds.covering_grid(level=0,
                          left_edge=ds.domain_left_edge,
                          dims=ds.domain_dimensions)
    Ex = ad[('mesh', 'Ex')].to_ndarray()
    Ey = ad[('mesh', 'Ey')].to_ndarray()
    Ez = ad[('mesh', 'Ez')].to_ndarray()
    Bx = ad[('mesh', 'Bx')].to_ndarray()
    By = ad[('mesh', 'By')].to_ndarray()
    Bz = ad[('mesh', 'Bz')].to_ndarray()
    rho = ad[('boxlib', 'rho')].to_ndarray()
    rho_electrons = ad[('boxlib', 'rho_electrons')].to_ndarray()
    rho_protons = ad[('boxlib', 'rho_protons')].to_ndarray()
    x = ad[('boxlib', 'x')].to_ndarray()
    y = ad[('boxlib', 'y')].to_ndarray()
    z = ad[('boxlib', 'z')].to_ndarray()

    # Field energy
    E2 = np.sum(Ex**2) + np.sum(Ey**2) + np.sum(Ez**2)
    B2 = np.sum(Bx**2) + np.sum(By**2) + np.sum(Bz**2)
    N = np.array(ds.domain_width / ds.domain_dimensions)
    dV = N[0] * N[1] * N[2]
    values_yt['field energy'] = 0.5 * dV * (E2 * eps0 + B2 / mu0)
    values_yt['field momentum in x'] = eps0 * np.sum(Ey * Bz - Ez * By) * dV
    values_yt['field momentum in y'] = eps0 * np.sum(Ez * Bx - Ex * Bz) * dV
    values_yt['field momentum in z'] = eps0 * np.sum(Ex * By - Ey * Bx) * dV

    # Field energy in quarter of simulation domain
    E2 = np.sum((Ex**2 + Ey**2 + Ez**2) * (y > 0) * (z < 0))
    B2 = np.sum((Bx**2 + By**2 + Bz**2) * (y > 0) * (z < 0))
    values_yt['field energy in quarter of simulation domain'] = 0.5 * dV * (
        E2 * eps0 + B2 / mu0)

    # Max/min values of various grid quantities
    values_yt['maximum of |Ex|'] = np.amax(np.abs(Ex))
    values_yt['maximum of |Ey|'] = np.amax(np.abs(Ey))
    values_yt['maximum of |Ez|'] = np.amax(np.abs(Ez))
    values_yt['maximum of |Bx|'] = np.amax(np.abs(Bx))
    values_yt['maximum of |By|'] = np.amax(np.abs(By))
    values_yt['maximum of |Bz|'] = np.amax(np.abs(Bz))
    values_yt['maximum of |E|'] = np.amax(np.sqrt(Ex**2 + Ey**2 + Ez**2))
    values_yt['maximum of |B|'] = np.amax(np.sqrt(Bx**2 + By**2 + Bz**2))
    values_yt['maximum of rho'] = np.amax(rho)
    values_yt['minimum of rho'] = np.amin(rho)
    values_yt['electrons: maximum of |rho|'] = np.amax(np.abs(rho_electrons))
    values_yt['protons: maximum of |rho|'] = np.amax(np.abs(rho_protons))
    values_yt['maximum of |B| from generic field reduction'] = np.amax(
        np.sqrt(Bx**2 + By**2 + Bz**2))
    values_yt['minimum of x*Ey*Bz'] = np.amin(x * Ey * Bz)

    #--------------------------------------------------------------------------------------------------
    # Part 2: get results from reduced diagnostics (label '_rd')
    #--------------------------------------------------------------------------------------------------

    # Quantities computed from reduced diagnostics
    values_rd = dict()

    # Load data from output files
    EFdata = np.genfromtxt('./diags/reducedfiles/EF.txt')  # Field energy
    EPdata = np.genfromtxt('./diags/reducedfiles/EP.txt')  # Particle energy
    PFdata = np.genfromtxt('./diags/reducedfiles/PF.txt')  # Field momentum
    PPdata = np.genfromtxt('./diags/reducedfiles/PP.txt')  # Particle momentum
    MFdata = np.genfromtxt('./diags/reducedfiles/MF.txt')  # Field maximum
    MRdata = np.genfromtxt('./diags/reducedfiles/MR.txt')  # Rho maximum
    NPdata = np.genfromtxt('./diags/reducedfiles/NP.txt')  # Particle number
    FR_Maxdata = np.genfromtxt(
        './diags/reducedfiles/FR_Max.txt')  # Field Reduction using maximum
    FR_Mindata = np.genfromtxt(
        './diags/reducedfiles/FR_Min.txt')  # Field Reduction using minimum
    FR_Integraldata = np.genfromtxt('./diags/reducedfiles/FR_Integral.txt'
                                    )  # Field Reduction using integral

    # First index "1" points to the values written at the last time step
    values_rd['field energy'] = EFdata[1][2]
    values_rd[
        'field energy in quarter of simulation domain'] = FR_Integraldata[1][2]
    values_rd['particle energy'] = EPdata[1][2]
    values_rd['electrons: particle energy'] = EPdata[1][3]
    values_rd['protons: particle energy'] = EPdata[1][4]
    values_rd['photons: particle energy'] = EPdata[1][5]
    values_rd['mean particle energy'] = EPdata[1][6]
    values_rd['electrons: mean particle energy'] = EPdata[1][7]
    values_rd['protons: mean particle energy'] = EPdata[1][8]
    values_rd['photons: mean particle energy'] = EPdata[1][9]
    values_rd['field momentum in x'] = PFdata[1][2]
    values_rd['field momentum in y'] = PFdata[1][3]
    values_rd['field momentum in z'] = PFdata[1][4]
    values_rd['particle momentum in x'] = PPdata[1][2]
    values_rd['particle momentum in y'] = PPdata[1][3]
    values_rd['particle momentum in z'] = PPdata[1][4]
    values_rd['electrons: particle momentum in x'] = PPdata[1][5]
    values_rd['electrons: particle momentum in y'] = PPdata[1][6]
    values_rd['electrons: particle momentum in z'] = PPdata[1][7]
    values_rd['protons: particle momentum in x'] = PPdata[1][8]
    values_rd['protons: particle momentum in y'] = PPdata[1][9]
    values_rd['protons: particle momentum in z'] = PPdata[1][10]
    values_rd['photons: particle momentum in x'] = PPdata[1][11]
    values_rd['photons: particle momentum in y'] = PPdata[1][12]
    values_rd['photons: particle momentum in z'] = PPdata[1][13]
    values_rd['mean particle momentum in x'] = PPdata[1][14]
    values_rd['mean particle momentum in y'] = PPdata[1][15]
    values_rd['mean particle momentum in z'] = PPdata[1][16]
    values_rd['electrons: mean particle momentum in x'] = PPdata[1][17]
    values_rd['electrons: mean particle momentum in y'] = PPdata[1][18]
    values_rd['electrons: mean particle momentum in z'] = PPdata[1][19]
    values_rd['protons: mean particle momentum in x'] = PPdata[1][20]
    values_rd['protons: mean particle momentum in y'] = PPdata[1][21]
    values_rd['protons: mean particle momentum in z'] = PPdata[1][22]
    values_rd['photons: mean particle momentum in x'] = PPdata[1][23]
    values_rd['photons: mean particle momentum in y'] = PPdata[1][24]
    values_rd['photons: mean particle momentum in z'] = PPdata[1][25]
    values_rd['maximum of |Ex|'] = MFdata[1][2]
    values_rd['maximum of |Ey|'] = MFdata[1][3]
    values_rd['maximum of |Ez|'] = MFdata[1][4]
    values_rd['maximum of |E|'] = MFdata[1][5]
    values_rd['maximum of |Bx|'] = MFdata[1][6]
    values_rd['maximum of |By|'] = MFdata[1][7]
    values_rd['maximum of |Bz|'] = MFdata[1][8]
    values_rd['maximum of |B|'] = MFdata[1][9]
    values_rd['maximum of rho'] = MRdata[1][2]
    values_rd['minimum of rho'] = MRdata[1][3]
    values_rd['electrons: maximum of |rho|'] = MRdata[1][4]
    values_rd['protons: maximum of |rho|'] = MRdata[1][5]
    values_rd['number of particles'] = NPdata[1][2]
    values_rd['electrons: number of particles'] = NPdata[1][3]
    values_rd['protons: number of particles'] = NPdata[1][4]
    values_rd['photons: number of particles'] = NPdata[1][5]
    values_rd['sum of weights'] = NPdata[1][6]
    values_rd['electrons: sum of weights'] = NPdata[1][7]
    values_rd['protons: sum of weights'] = NPdata[1][8]
    values_rd['photons: sum of weights'] = NPdata[1][9]
    values_rd['maximum of |B| from generic field reduction'] = FR_Maxdata[1][2]
    values_rd['minimum of x*Ey*Bz'] = FR_Mindata[1][2]

    #--------------------------------------------------------------------------------------------------
    # Part 3: compare values from plotfiles and reduced diagnostics and print output
    #--------------------------------------------------------------------------------------------------

    error = dict()
    tolerance = 5e-3 if single_precision else 1e-12
    field_energy_tolerance = 0.3

    # The comparison of field energies requires a large tolerance,
    # because the field energy from the plotfiles is computed from cell-centered data,
    # while the field energy from the reduced diagnostics is computed from (Yee) staggered data.
    for k in values_yt.keys():
        print()
        print('values_yt[' + k + '] = ', values_yt[k])
        print('values_rd[' + k + '] = ', values_rd[k])
        error[k] = abs(values_yt[k] - values_rd[k]) / abs(values_yt[k])
        print('relative error = ', error[k])
        tol = field_energy_tolerance if (k == 'field energy') else tolerance
        assert (error[k] < tol)
        print()

    test_name = os.path.split(os.getcwd())[1]
    checksumAPI.evaluate_checksum(test_name, fn)
Esempio n. 21
0
# Check that the number of particles at the level weight is the same as predicted from analytic
# calculations
numparts_leveled = np.argmax(
    w > level_weight)  # This returns the first index for which
# w > level_weight, which thus corresponds to the number of particles at the level weight
expected_numparts_leveled = numparts_init/(2.*t_r)*(1+erf(expected_mean_initial_weight*(t_r-1.)) \
                            -1./(np.sqrt(np.pi)*expected_mean_initial_weight)* \
                            np.exp(-(expected_mean_initial_weight*(t_r-1.))**2))
error = np.abs(numparts_leveled - expected_numparts_leveled)
std_numparts_leveled = np.sqrt(expected_numparts_leveled - numparts_init/np.sqrt(np.pi)/(t_r* \
                       expected_mean_initial_weight)**2*(np.sqrt(np.pi)/4.* \
                       (2.*expected_mean_initial_weight**2+1.)*(1.-erf(expected_mean_initial_weight* \
                       (t_r-1.)))-0.5*np.exp(-(expected_mean_initial_weight*(t_r-1.))**2* \
                       (expected_mean_initial_weight*(t_r+1.)))))
# 5 sigma test that has an intrinsic probability to fail of 1 over ~2 millions
print(
    "difference between expected and actual number of leveled particles (2nd species): "
    + str(error))
print("tolerance: " + str(5 * std_numparts_leveled))

numparts_unaffected = w.shape[0] - numparts_leveled
numparts_unaffected_anticipated = w0.shape[0] - np.argmax(w0 > level_weight)
# Check that number of particles with weight higher than level weight is the same before and after
# resampling
assert (numparts_unaffected == numparts_unaffected_anticipated)
# Check that particles with weight higher than level weight are unaffected by resampling.
assert (np.all(w[-numparts_unaffected:] == w0[-numparts_unaffected:]))

test_name = os.path.split(os.getcwd())[1]
checksumAPI.evaluate_checksum(test_name, fn_final)
Esempio n. 22
0
Ey = ad['Ey'].to_ndarray()
Ez = ad['Ez'].to_ndarray()
Bx = ad['Bx'].to_ndarray()
By = ad['By'].to_ndarray()
Bz = ad['Bz'].to_ndarray()
Es = np.sum(Ex**2)+np.sum(Ey**2)+np.sum(Ez**2)
Bs = np.sum(Bx**2)+np.sum(By**2)+np.sum(Bz**2)
N  = np.array( ds.domain_width / ds.domain_dimensions )
dV = N[0]*N[1]*N[2]
EFyt = 0.5*Es*scc.epsilon_0*dV + 0.5*Bs/scc.mu_0*dV

# PART2: get results from reduced diagnostics

EFdata = np.genfromtxt("./diags/reducedfiles/EF.txt")
EPdata = np.genfromtxt("./diags/reducedfiles/EP.txt")
EF = EFdata[1][2]
EP = EPdata[1][2]

# PART3: print and assert

print('difference of field energy:', abs(EFyt-EF))
print('tolerance of field energy:', 1.0e-3)
print('difference of particle energy:', abs(EPyt-EP))
print('tolerance of particle energy:', 1.0e-8)

assert(abs(EFyt-EF) < 1.0e-3)
assert(abs(EPyt-EP) < 1.0e-8)

test_name = fn[:-9] # Could also be os.path.split(os.getcwd())[1]
checksumAPI.evaluate_checksum(test_name, fn)
Esempio n. 23
0
plt.tight_layout()
plt.savefig('langmuir_multi_analysis.png')

tolerance_rel = 0.15

print("error_rel    : " + str(error_rel))
print("tolerance_rel: " + str(tolerance_rel))

assert (error_rel < tolerance_rel)

# Check relative L-infinity spatial norm of rho/epsilon_0 - div(E) when
# current correction (psatd.do_current_correction=1) is applied or when
# Vay current deposition (algo.current_deposition=vay) is used
if current_correction or vay_deposition:
    rho = data['rho'].to_ndarray()
    divE = data['divE'].to_ndarray()
    error_rel = np.amax(np.abs(divE - rho / epsilon_0)) / np.amax(
        np.abs(rho / epsilon_0))
    tolerance = 1.e-9
    print("Check charge conservation:")
    print("error_rel = {}".format(error_rel))
    print("tolerance = {}".format(tolerance))
    assert (error_rel < tolerance)

test_name = fn[:-9]  # Could also be os.path.split(os.getcwd())[1]

if re.search('single_precision', fn):
    checksumAPI.evaluate_checksum(test_name, fn, rtol=1.e-3)
else:
    checksumAPI.evaluate_checksum(test_name, fn)
def do_analysis(single_precision=False):
    fn = sys.argv[1]

    ds = yt.load(fn)
    ad = ds.all_data()
    ad0 = ds.covering_grid(level=0,
                           left_edge=ds.domain_left_edge,
                           dims=ds.domain_dimensions)

    opmd = io.Series('diags/openpmd/openpmd_%T.h5', io.Access.read_only)
    opmd_i = opmd.iterations[200]

    #--------------------------------------------------------------------------------------------------
    # Part 1: get results from plotfiles (label '_yt')
    #--------------------------------------------------------------------------------------------------

    # Quantities computed from plotfiles
    values_yt = dict()

    domain_size = ds.domain_right_edge.value - ds.domain_left_edge.value
    dx = domain_size / ds.domain_dimensions

    # Electrons
    x = ad['electrons', 'particle_position_x'].to_ndarray()
    y = ad['electrons', 'particle_position_y'].to_ndarray()
    z = ad['electrons', 'particle_position_z'].to_ndarray()
    uz = ad['electrons', 'particle_momentum_z'].to_ndarray() / m_e / c
    w = ad['electrons', 'particle_weight'].to_ndarray()
    filt = uz < 0

    x_ind = ((x - ds.domain_left_edge[0].value) / dx[0]).astype(int)
    y_ind = ((y - ds.domain_left_edge[1].value) / dx[1]).astype(int)
    z_ind = ((z - ds.domain_left_edge[2].value) / dx[2]).astype(int)

    zavg = np.zeros(ds.domain_dimensions)
    uzavg = np.zeros(ds.domain_dimensions)
    zuzavg = np.zeros(ds.domain_dimensions)
    wavg = np.zeros(ds.domain_dimensions)
    uzavg_filt = np.zeros(ds.domain_dimensions)
    wavg_filt = np.zeros(ds.domain_dimensions)

    for i_p in range(len(x)):
        zavg[x_ind[i_p], y_ind[i_p], z_ind[i_p]] += z[i_p] * w[i_p]
        uzavg[x_ind[i_p], y_ind[i_p], z_ind[i_p]] += uz[i_p] * w[i_p]
        zuzavg[x_ind[i_p], y_ind[i_p], z_ind[i_p]] += z[i_p] * uz[i_p] * w[i_p]
        wavg[x_ind[i_p], y_ind[i_p], z_ind[i_p]] += w[i_p]
        uzavg_filt[x_ind[i_p], y_ind[i_p],
                   z_ind[i_p]] += uz[i_p] * w[i_p] * filt[i_p]
        wavg_filt[x_ind[i_p], y_ind[i_p], z_ind[i_p]] += w[i_p] * filt[i_p]

    wavg_adj = np.where(wavg == 0, 1, wavg)
    wavg_filt_adj = np.where(wavg_filt == 0, 1, wavg_filt)
    values_yt['electrons: zavg'] = zavg / wavg_adj
    values_yt['electrons: uzavg'] = uzavg / wavg_adj
    values_yt['electrons: zuzavg'] = zuzavg / wavg_adj
    values_yt['electrons: uzavg_filt'] = uzavg_filt / wavg_filt_adj

    # protons
    x = ad['protons', 'particle_position_x'].to_ndarray()
    y = ad['protons', 'particle_position_y'].to_ndarray()
    z = ad['protons', 'particle_position_z'].to_ndarray()
    uz = ad['protons', 'particle_momentum_z'].to_ndarray() / m_p / c
    w = ad['protons', 'particle_weight'].to_ndarray()
    filt = uz < 0

    x_ind = ((x - ds.domain_left_edge[0].value) / dx[0]).astype(int)
    y_ind = ((y - ds.domain_left_edge[1].value) / dx[1]).astype(int)
    z_ind = ((z - ds.domain_left_edge[2].value) / dx[2]).astype(int)

    zavg = np.zeros(ds.domain_dimensions)
    uzavg = np.zeros(ds.domain_dimensions)
    zuzavg = np.zeros(ds.domain_dimensions)
    wavg = np.zeros(ds.domain_dimensions)
    uzavg_filt = np.zeros(ds.domain_dimensions)
    wavg_filt = np.zeros(ds.domain_dimensions)

    for i_p in range(len(x)):
        zavg[x_ind[i_p], y_ind[i_p], z_ind[i_p]] += z[i_p] * w[i_p]
        uzavg[x_ind[i_p], y_ind[i_p], z_ind[i_p]] += uz[i_p] * w[i_p]
        zuzavg[x_ind[i_p], y_ind[i_p], z_ind[i_p]] += z[i_p] * uz[i_p] * w[i_p]
        wavg[x_ind[i_p], y_ind[i_p], z_ind[i_p]] += w[i_p]
        uzavg_filt[x_ind[i_p], y_ind[i_p],
                   z_ind[i_p]] += uz[i_p] * w[i_p] * filt[i_p]
        wavg_filt[x_ind[i_p], y_ind[i_p], z_ind[i_p]] += w[i_p] * filt[i_p]

    wavg_adj = np.where(wavg == 0, 1, wavg)
    wavg_filt_adj = np.where(wavg_filt == 0, 1, wavg_filt)
    values_yt['protons: zavg'] = zavg / wavg_adj
    values_yt['protons: uzavg'] = uzavg / wavg_adj
    values_yt['protons: zuzavg'] = zuzavg / wavg_adj
    values_yt['protons: uzavg_filt'] = uzavg_filt / wavg_filt_adj

    # Photons (momentum in units of m_e c)
    x = ad['photons', 'particle_position_x'].to_ndarray()
    y = ad['photons', 'particle_position_y'].to_ndarray()
    z = ad['photons', 'particle_position_z'].to_ndarray()
    uz = ad['photons', 'particle_momentum_z'].to_ndarray() / m_e / c
    w = ad['photons', 'particle_weight'].to_ndarray()
    filt = uz < 0

    x_ind = ((x - ds.domain_left_edge[0].value) / dx[0]).astype(int)
    y_ind = ((y - ds.domain_left_edge[1].value) / dx[1]).astype(int)
    z_ind = ((z - ds.domain_left_edge[2].value) / dx[2]).astype(int)

    zavg = np.zeros(ds.domain_dimensions)
    uzavg = np.zeros(ds.domain_dimensions)
    zuzavg = np.zeros(ds.domain_dimensions)
    wavg = np.zeros(ds.domain_dimensions)
    uzavg_filt = np.zeros(ds.domain_dimensions)
    wavg_filt = np.zeros(ds.domain_dimensions)

    for i_p in range(len(x)):
        zavg[x_ind[i_p], y_ind[i_p], z_ind[i_p]] += z[i_p] * w[i_p]
        uzavg[x_ind[i_p], y_ind[i_p], z_ind[i_p]] += uz[i_p] * w[i_p]
        zuzavg[x_ind[i_p], y_ind[i_p], z_ind[i_p]] += z[i_p] * uz[i_p] * w[i_p]
        wavg[x_ind[i_p], y_ind[i_p], z_ind[i_p]] += w[i_p]
        uzavg_filt[x_ind[i_p], y_ind[i_p],
                   z_ind[i_p]] += uz[i_p] * w[i_p] * filt[i_p]
        wavg_filt[x_ind[i_p], y_ind[i_p], z_ind[i_p]] += w[i_p] * filt[i_p]

    wavg_adj = np.where(wavg == 0, 1, wavg)
    wavg_filt_adj = np.where(wavg_filt == 0, 1, wavg_filt)
    values_yt['photons: zavg'] = zavg / wavg_adj
    values_yt['photons: uzavg'] = uzavg / wavg_adj
    values_yt['photons: zuzavg'] = zuzavg / wavg_adj
    values_yt['photons: uzavg_filt'] = uzavg_filt / wavg_filt_adj

    values_rd = dict()
    # Load reduced particle diagnostic data from plotfiles
    values_rd['electrons: zavg'] = ad0[('boxlib', 'z_electrons')]
    values_rd['protons: zavg'] = ad0[('boxlib', 'z_protons')]
    values_rd['photons: zavg'] = ad0[('boxlib', 'z_photons')]

    values_rd['electrons: uzavg'] = ad0[('boxlib', 'uz_electrons')]
    values_rd['protons: uzavg'] = ad0[('boxlib', 'uz_protons')]
    values_rd['photons: uzavg'] = ad0[('boxlib', 'uz_photons')]

    values_rd['electrons: zuzavg'] = ad0[('boxlib', 'zuz_electrons')]
    values_rd['protons: zuzavg'] = ad0[('boxlib', 'zuz_protons')]
    values_rd['photons: zuzavg'] = ad0[('boxlib', 'zuz_photons')]

    values_rd['electrons: uzavg_filt'] = ad0[('boxlib', 'uz_filt_electrons')]
    values_rd['protons: uzavg_filt'] = ad0[('boxlib', 'uz_filt_protons')]
    values_rd['photons: uzavg_filt'] = ad0[('boxlib', 'uz_filt_photons')]

    values_opmd = dict()
    # Load reduced particle diagnostic data from OPMD output
    values_opmd['electrons: zavg'] = opmd_i.meshes['z_electrons'][
        io.Mesh_Record_Component.SCALAR].load_chunk()
    values_opmd['protons: zavg'] = opmd_i.meshes['z_protons'][
        io.Mesh_Record_Component.SCALAR].load_chunk()
    values_opmd['photons: zavg'] = opmd_i.meshes['z_photons'][
        io.Mesh_Record_Component.SCALAR].load_chunk()

    values_opmd['electrons: uzavg'] = opmd_i.meshes['uz_electrons'][
        io.Mesh_Record_Component.SCALAR].load_chunk()
    values_opmd['protons: uzavg'] = opmd_i.meshes['uz_protons'][
        io.Mesh_Record_Component.SCALAR].load_chunk()
    values_opmd['photons: uzavg'] = opmd_i.meshes['uz_photons'][
        io.Mesh_Record_Component.SCALAR].load_chunk()

    values_opmd['electrons: zuzavg'] = opmd_i.meshes['zuz_electrons'][
        io.Mesh_Record_Component.SCALAR].load_chunk()
    values_opmd['protons: zuzavg'] = opmd_i.meshes['zuz_protons'][
        io.Mesh_Record_Component.SCALAR].load_chunk()
    values_opmd['photons: zuzavg'] = opmd_i.meshes['zuz_photons'][
        io.Mesh_Record_Component.SCALAR].load_chunk()

    values_opmd['electrons: uzavg_filt'] = opmd_i.meshes['uz_filt_electrons'][
        io.Mesh_Record_Component.SCALAR].load_chunk()
    values_opmd['protons: uzavg_filt'] = opmd_i.meshes['uz_filt_protons'][
        io.Mesh_Record_Component.SCALAR].load_chunk()
    values_opmd['photons: uzavg_filt'] = opmd_i.meshes['uz_filt_photons'][
        io.Mesh_Record_Component.SCALAR].load_chunk()
    opmd.flush()
    del opmd

    #--------------------------------------------------------------------------------------------------
    # Part 3: compare values from plotfiles and diagnostics and print output
    #--------------------------------------------------------------------------------------------------

    error_plt = dict()
    error_opmd = dict()
    tolerance = 5e-3 if single_precision else 1e-12
    # if single precision, increase tolerance from default value
    check_tolerance = 5e-3 if single_precision else 1e-9

    for k in values_yt.keys():
        # check that the zeros line up, since we'll be ignoring them in the error calculation
        assert (np.all((values_yt[k] == 0) == (values_rd[k] == 0)))
        error_plt[k] = np.max(
            abs(values_yt[k] - values_rd[k])[values_yt[k] != 0] /
            abs(values_yt[k])[values_yt[k] != 0])
        print(k, 'relative error plotfile = ', error_plt[k])
        assert (error_plt[k] < tolerance)
        assert (np.all((values_yt[k] == 0) == (values_opmd[k].T == 0)))
        error_opmd[k] = np.max(
            abs(values_yt[k] - values_opmd[k].T)[values_yt[k] != 0] /
            abs(values_yt[k])[values_yt[k] != 0])
        assert (error_opmd[k] < tolerance)
        print(k, 'relative error openPMD = ', error_opmd[k])

    test_name = os.path.split(os.getcwd())[1]
    checksumAPI.evaluate_checksum(test_name, fn, rtol=check_tolerance)