Exemple #1
0
    def __init__(self, test_platforms, gas, specs, reacs):
        self.test_platforms = test_platforms
        self.gas = gas
        self.specs = specs
        self.reacs = reacs
        self.test_size = test_size
        self.script_dir = script_dir
        self.build_dir = build_dir
        self.obj_dir = obj_dir
        self.lib_dir = lib_dir

        # clean out build dir
        utils.create_dir(build_dir)
        for f in os.listdir(build_dir):
            if os.path.isfile(os.path.join(build_dir, f)):
                os.remove(os.path.join(build_dir, f))

        # info
        self.nspec = self.gas.n_species
        self.nrxn = self.gas.n_reactions
        # Ns - 1 + Temperature + Extra Variable
        self.jac_dim = self.gas.n_species - 1 + 2

        # create states
        np.random.seed(0)
        self.T = np.random.uniform(600, 2200, size=test_size)
        self.P = np.random.uniform(0.5, 50, size=test_size) * ct.one_atm
        self.V = np.random.uniform(1e-3, 1, size=test_size)
        self.Y = np.random.uniform(0, 1, size=(test_size, self.gas.n_species))
        # randomly set some zeros for each species
        self.Y[np.random.choice(self.Y.shape[0], size=gas.n_species),
               np.arange(gas.n_species)] = 0
        self.concs = np.empty_like(self.Y)
        self.n = np.empty((test_size, self.gas.n_species - 1))
        self.fwd_rate_constants = np.zeros((test_size, self.gas.n_reactions))
        self.fwd_rxn_rate = np.zeros((test_size, self.gas.n_reactions))

        self.spec_u = np.zeros((test_size, gas.n_species))
        self.spec_h = np.zeros((test_size, gas.n_species))
        self.spec_cv = np.zeros((test_size, gas.n_species))
        self.spec_cp = np.zeros((test_size, gas.n_species))
        self.spec_b = np.zeros((test_size, gas.n_species))
        self.conp_temperature_rates = np.zeros(test_size)
        self.conv_temperature_rates = np.zeros(test_size)

        # third body indicies
        self.thd_inds = np.array([
            i for i, x in enumerate(gas.reactions())
            if isinstance(x, ct.FalloffReaction)
            or isinstance(x, ct.ChemicallyActivatedReaction)
            or isinstance(x, ct.ThreeBodyReaction)
        ])
        self.ref_thd = np.zeros((test_size, self.thd_inds.size))
        self.ref_pres_mod = np.zeros((test_size, self.thd_inds.size))
        self.species_rates = np.zeros((test_size, gas.n_species))
        self.rxn_rates = np.zeros((test_size, gas.n_reactions))
        thd_eff_maps = []
        for i in self.thd_inds:
            default = gas.reaction(i).default_efficiency
            # fill all missing with default
            thd_eff_map = [
                default if j not in gas.reaction(i).efficiencies else
                gas.reaction(i).efficiencies[j] for j in gas.species_names
            ]
            thd_eff_maps.append(np.array(thd_eff_map))
        thd_eff_maps = np.array(thd_eff_maps)

        # various indicies and mappings
        self.rev_inds = np.array(
            [i for i in range(gas.n_reactions) if gas.is_reversible(i)],
            dtype=np.int32)
        self.rev_rate_constants = np.zeros((test_size, self.rev_inds.size))
        self.rev_rxn_rate = np.zeros((test_size, self.rev_inds.size))
        self.equilibrium_constants = np.zeros((test_size, self.rev_inds.size))

        self.fall_inds = np.array([
            i for i, x in enumerate(gas.reactions())
            if isinstance(x, ct.FalloffReaction)
        ])
        self.sri_inds = np.array([
            i for i, x in enumerate(gas.reactions())
            if i in self.fall_inds and isinstance(x.falloff, ct.SriFalloff)
        ])
        self.troe_inds = np.array([
            i for i, x in enumerate(gas.reactions())
            if i in self.fall_inds and isinstance(x.falloff, ct.TroeFalloff)
        ])
        self.lind_inds = np.array([
            i for i, x in enumerate(gas.reactions()) if i in self.fall_inds
            and not (i in self.troe_inds or i in self.sri_inds)
        ])
        self.troe_to_pr_map = np.array(
            [np.where(self.fall_inds == j)[0][0] for j in self.troe_inds])
        self.sri_to_pr_map = np.array(
            [np.where(self.fall_inds == j)[0][0] for j in self.sri_inds])
        self.lind_to_pr_map = np.array(
            [np.where(self.fall_inds == j)[0][0] for j in self.lind_inds])
        self.ref_Pr = np.zeros((test_size, self.fall_inds.size))
        self.ref_Sri = np.zeros((test_size, self.sri_inds.size))
        self.ref_Troe = np.zeros((test_size, self.troe_inds.size))
        self.ref_Lind = np.ones((test_size, self.lind_inds.size))
        self.ref_Fall = np.ones((test_size, self.fall_inds.size))
        self.ref_B_rev = np.zeros((test_size, gas.n_species))
        # and the corresponding reactions
        fall_reacs = [gas.reaction(j) for j in self.fall_inds]
        sri_reacs = [gas.reaction(j) for j in self.sri_inds]
        troe_reacs = [gas.reaction(j) for j in self.troe_inds]

        # convenience method for reduced pressure evaluation
        arrhen_temp = np.zeros(self.fall_inds.size)

        def pr_eval(i, j):
            reac = fall_reacs[j]
            return reac.low_rate(self.T[i]) / reac.high_rate(self.T[i])

        thd_to_fall_map = np.where(np.in1d(self.thd_inds, self.fall_inds))[0]

        for i in range(test_size):
            self.gas.TPY = self.T[i], self.P[i], self.Y[i, :]
            self.concs[i, :] = self.gas.concentrations[:]

            # set moles
            self.n[i, :] = self.concs[i, :-1] * self.V[i]

            # ensure that n_ns is non-negative
            n_ns = self.P[i] * self.V[i] / (ct.gas_constant * self.T[i]) \
                - np.sum(self.n[i, :])
            assert n_ns >= 0 or np.isclose(n_ns, 0)

            # store various information
            self.fwd_rate_constants[i, :] = gas.forward_rate_constants[:]
            self.rev_rate_constants[i, :] = gas.reverse_rate_constants[
                self.rev_inds]
            self.equilibrium_constants[i, :] = gas.equilibrium_constants[
                self.rev_inds]
            self.fwd_rxn_rate[i, :] = gas.forward_rates_of_progress[:]
            self.rev_rxn_rate[i, :] = gas.reverse_rates_of_progress[
                self.rev_inds]
            self.rxn_rates[i, :] = gas.net_rates_of_progress[:]
            self.species_rates[i, :] = gas.net_production_rates[:]
            self.ref_thd[i, :] = np.dot(thd_eff_maps, self.concs[i, :])
            # species thermo props
            for j in range(gas.n_species):
                cp = gas.species(j).thermo.cp(self.T[i])
                s = gas.species(j).thermo.s(self.T[i])
                h = gas.species(j).thermo.h(self.T[i])
                self.spec_cv[i, j] = cp - ct.gas_constant
                self.spec_cp[i, j] = cp
                self.spec_b[i, j] = s / ct.gas_constant - h / \
                    (ct.gas_constant * self.T[i]) - np.log(self.T[i])
                self.spec_u[i, j] = h - self.T[i] * ct.gas_constant
                self.spec_h[i, j] = h

            self.conp_temperature_rates[i] = (
                -np.dot(self.spec_h[i, :], self.species_rates[i, :]) /
                np.dot(self.spec_cp[i, :], self.concs[i, :]))
            self.conv_temperature_rates[i] = (
                -np.dot(self.spec_u[i, :], self.species_rates[i, :]) /
                np.dot(self.spec_cv[i, :], self.concs[i, :]))
            for j in range(self.fall_inds.size):
                arrhen_temp[j] = pr_eval(i, j)
            self.ref_Pr[i, :] = self.ref_thd[i, thd_to_fall_map] * arrhen_temp
            for j in range(self.sri_inds.size):
                self.ref_Sri[i, j] = sri_reacs[j].falloff(
                    self.T[i], self.ref_Pr[i, self.sri_to_pr_map[j]])
            for j in range(self.troe_inds.size):
                self.ref_Troe[i, j] = troe_reacs[j].falloff(
                    self.T[i], self.ref_Pr[i, self.troe_to_pr_map[j]])
            if self.sri_inds.size:
                self.ref_Fall[i, self.sri_to_pr_map] = self.ref_Sri[i, :]
            if self.troe_inds.size:
                self.ref_Fall[i, self.troe_to_pr_map] = self.ref_Troe[i, :]
            for j in range(gas.n_species):
                self.ref_B_rev[i, j] = gas.species(j).thermo.s(
                    self.T[i]) / ct.gas_constant - gas.species(j).thermo.h(
                        self.T[i]) / (ct.gas_constant * self.T[i]) - np.log(
                            self.T[i])

        # set phi
        self.phi_cp = np.concatenate(
            (self.T.reshape(-1, 1), self.V.reshape(-1, 1), self.n), axis=1)
        self.phi_cv = np.concatenate(
            (self.T.reshape(-1, 1), self.P.reshape(-1, 1), self.n), axis=1)

        # get extra variable rates
        mws = self.gas.molecular_weights
        mws = (1 - mws[:-1] / mws[-1])
        self.V_dot = self.V * (self.T * ct.gas_constant / self.P *
                               np.dot(self.species_rates[:, :-1], mws) +
                               self.conp_temperature_rates / self.T)

        self.P_dot = (self.T * ct.gas_constant *
                      np.dot(self.species_rates[:, :-1], mws) +
                      self.conv_temperature_rates * self.P / self.T)

        self.dphi_cp = np.concatenate((self.conp_temperature_rates.reshape(
            -1, 1), self.V_dot.reshape(
                -1, 1), self.species_rates[:, :-1] * self.V[:, np.newaxis]),
                                      axis=1)
        self.dphi_cv = np.concatenate((self.conv_temperature_rates.reshape(
            -1, 1), self.P_dot.reshape(
                -1, 1), self.species_rates[:, :-1] * self.V[:, np.newaxis]),
                                      axis=1)

        # the pressure mod terms depend on the reaction type
        # for pure third bodies, it's just the third body conc:
        self.ref_pres_mod[:, :] = self.ref_thd[:, :]
        # now find the Pr poly
        Pr_poly = 1. / (1. + self.ref_Pr)
        # and multiply the falloff (i.e. non-chem activated terms)
        pure_fall_inds = np.array([
            i for i, x in enumerate(gas.reactions())
            if isinstance(x, ct.FalloffReaction)
            and not isinstance(x, ct.ChemicallyActivatedReaction)
        ])
        pure_fall_inds = np.where(np.in1d(self.fall_inds, pure_fall_inds))[0]
        Pr_poly[:, pure_fall_inds] *= self.ref_Pr[:, pure_fall_inds]
        # finally find the product of the Pr poly and the falloff blending term
        Fall_pres_mod = Pr_poly * self.ref_Fall
        # and replace in the pressure mod
        replace_inds = np.where(np.in1d(self.thd_inds, self.fall_inds))[0]
        self.ref_pres_mod[:, replace_inds] = Fall_pres_mod[:, :]
Exemple #2
0
def run(gas, interval, num_states, work_dir, repeats=10):
    def arrhenius(T, A, b, Ea, print_stats=False):
        vals = A * np.power(T, b) * np.exp(-Ea / (ct.gas_constant * T))
        if print_stats:
            print(A, b, Ea, np.max(np.abs(vals - kf) / kf))
        return vals

    # first, convert all reactions to a single rate form
    reacs = gas.reactions()[:]
    for i, reac in enumerate(reacs):
        if isinstance(reac, ct.ChemicallyActivatedReaction):
            reacs[i] = ct.ElementaryReaction(reac.reactants, reac.products)
            reacs[i].rate = reac.high_rate
        elif isinstance(reac, ct.FalloffReaction):
            reacs[i] = ct.ElementaryReaction(reac.reactants, reac.products)
            reacs[i].rate = reac.high_rate
        elif isinstance(reac, ct.ThreeBodyReaction):
            reacs[i] = ct.ElementaryReaction(reac.reactants, reac.products)
            reacs[i].rate = reac.rate
        elif isinstance(reac, ct.PlogReaction):
            reacs[i] = ct.ElementaryReaction(reac.reactants, reac.products)
            reacs[i].rate = reac.rates[-1][1]
        elif isinstance(reac, ct.ChebyshevReaction):
            # fit _something_ to it at a 10 bar, for 600-2200K
            T = np.linspace(600, 2200, num=1000)
            kf = np.zeros_like(T)
            for j in range(T.size):
                kf[j] = reac(T[j], 10 * 10*1e5)
            A = 1e10
            b = 1
            Ea = 1200 * ct.gas_constant
            (A, b, Ea), _ = curve_fit(
                arrhenius, T, kf,
                p0=[A, b, Ea],
                bounds=[(0, -2, 0), (np.inf, 5, np.inf)],
                maxfev=1e6,
                xtol=1e-15)
            if False:
                plt.plot(T, kf, color='b', linestyle='-', label='cheb')
                plt.plot(T, arrhenius(T, A, b, Ea), color='r', linestyle='--',
                         label='fit')
                plt.legend(loc=0)
                plt.show()
            reacs[i] = ct.ElementaryReaction(reac.reactants, reac.products)
            reacs[i].rate = ct.Arrhenius(A, b, Ea)
        # set all to reversible
        reacs[i].reversible = True

    # and convert gas
    gas = ct.Solution(thermo='IdealGas', kinetics='GasKinetics',
                      reactions=reacs,
                      species=gas.species())

    # next, order the reactions by the number of distinct species
    def get_reac_and_spec_maps(mygas):
        # first, create rxn->species maps
        reac_nu = mygas.reactant_stoich_coeffs()
        prod_nu = mygas.product_stoich_coeffs()
        # species -> rxn mappings
        spec_masks = np.zeros((mygas.n_species, mygas.n_reactions),
                              dtype=np.bool)
        # the reaction -> species mappings
        reac_masks = []
        for i, reac in enumerate(mygas.reactions()):
            for spec in set(list(reac.reactants.keys()) + list(
                                 reac.products.keys())):
                j = mygas.species_index(spec)
                if prod_nu[j, i] - reac_nu[j, i]:
                    # non-zero species
                    spec_masks[j, i] = True
            reac_masks.append(np.where(spec_masks[:, i])[0])

        # convert to flat, fixed size array
        copy = np.array(reac_masks, copy=True)
        max_size = np.max([x.size for x in reac_masks])
        reac_masks = np.full((len(reac_masks), max_size), -1)
        for i, mask in enumerate(copy):
            reac_masks[i, :mask.size] = mask[:]

        return spec_masks, reac_masks

    # ensure we didn't remove any species
    spec_masks, reac_masks = get_reac_and_spec_maps(gas)
    converted_spec_count = np.where(~np.sum(spec_masks, axis=1))[0].size
    assert converted_spec_count == gas.n_species

    def species_to_rxn_count(reac_list=slice(None)):
        """
        The number of reactions each species is in
        """
        return np.sum(spec_masks[:, reac_list], axis=1)

    def simple(specs, spec_counts):
        """
        Returns 0 if any species in the reaction is unique to that reaction
        otherwise mean of the number of reactions per species in the reaction
        """
        specs = specs[np.where(specs >= 0)]
        if np.any(spec_counts[specs] == 1):
            return 0
        return np.mean(spec_counts[specs])

    def minh(specs, spec_counts):
        """
        The minimum number of reactions any species in the reaction is in
        """

        specs = specs[np.where(specs >= 0)]
        minh = np.min(spec_counts[specs])
        return 0 if minh == 1 else minh

    def rxn_scores(heuristic, reac_list=slice(None)):
        """
        Returns a score from 0--1, where 1 indicates that the reactions is a
        good removal candidate and 0 a bad candidate

        Heuristics correspond to local methods
        """

        reac_list = np.arange(gas.n_reactions)[reac_list]
        s2r = species_to_rxn_count(reac_list)
        scores = np.apply_along_axis(heuristic, 1, reac_masks[reac_list], s2r)
        return scores

    def get_next_removed(heuristic, reac_list=slice(None)):
        """
        Get the index of the next reaction to remove from the current reac_list
        using the given heuristic
        """
        scores = rxn_scores(heuristic, reac_list)
        amax = np.argmax(scores)
        if scores[amax] == 0:
            return -1
        return reac_list[amax]

    def active_reactions(reac_list, return_active=False):
        """
        Returns the number of active reactions in the reac_list
        If return_active is True, return the list of active reactions
        """
        alist = np.where(reac_list >= 0)[0]
        if return_active:
            return alist
        return alist.size

    saved_reaction_lists = []
    reac_list = np.arange(gas.n_reactions)
    saved_reaction_lists.append(active_reactions(reac_list, True))
    while True:
        remove_at = get_next_removed(minh, reac_list=np.where(
            reac_list >= 0)[0])
        if remove_at == -1:
            break
        reac_list[remove_at] = -1
        if (active_reactions(reac_list) <= active_reactions(
                saved_reaction_lists[-1]) - interval):
            # save list
            saved_reaction_lists.append(active_reactions(reac_list, True))

    # get final reaction list and save
    reac_list = active_reactions(reac_list, True)
    saved_reaction_lists.append(reac_list)

    def gas_from_reac_list(reac_list):
        reacs = gas.reactions()[:]
        reacs = [reacs[i] for i in reac_list]
        return ct.Solution(thermo='IdealGas', kinetics='GasKinetics',
                           reactions=reacs,
                           species=gas.species())

    # remap, and ensure the number of removed species is not less than
    # previously
    newgas = gas_from_reac_list(reac_list)
    smap_final, _ = get_reac_and_spec_maps(newgas)
    scount_final = np.where(~np.sum(smap_final, axis=1))[0].size
    assert scount_final == converted_spec_count

    print('Final mechanism size: {} reactions, {} species'.format(
        newgas.n_reactions, scount_final))

    vecsize = 8
    platform = 'intel'
    split_rate_kernels = False
    lang = 'opencl'
    rate_spec = 'hybrid'
    num_cores = 1
    order = 'C'

    def get_filename(wide=False):
        """
        emulate pyjac's naming scheme
        """
        desc = 'spec'
        vsize = vecsize if wide else '1'
        vectype = 'w' if wide else 'par'
        platform = 'intel'
        split = 'split' if split_rate_kernels else 'single'
        conp = 'conp'

        return '{}_{}_{}_{}_{}_{}_{}_{}_{}_{}'.format(
                desc, lang, vsize, order,
                vectype, platform, rate_spec,
                split, num_cores, conp) + '.txt'

    def check_file(file):
        if not os.path.exists(file):
            return repeats
        with open(file, 'r') as f:
            lines = f.readlines()
        import re
        todo = repeats
        for line in lines:
            line = line.split(',')
            if len(line) > 1 and sum(
                    1 if re.search(r'(?:\d+(?:\.\d+e[+-]\d+))', l) else 0
                    for l in line) == 4:
                # four doubles -> good line
                todo -= 1
        return todo

    build = os.path.join(path, 'out')
    obj = os.path.join(path, 'obj')
    lib = os.path.join(path, 'lib')

    for wide in [True, False]:
        vsize = vecsize if wide else None
        # now, go through the various generated reactions lists and run
        # the test on each
        for reac_list in saved_reaction_lists:
            outname = get_filename(wide)
            todo = check_file(outname)
            # clean
            clean_dir(build, remove_dir=False)
            clean_dir(obj, remove_dir=False)
            clean_dir(lib, remove_dir=False)

            subdir = os.path.join(work_dir, str(active_reactions(reac_list)))
            create_dir(subdir)
            # generate the source rate evaluation
            rgas = gas_from_reac_list(reac_list)
            create_jacobian('opencl', gas=rgas, vector_size=vsize,
                            wide=wide, build_path=build,
                            rate_specialization=rate_spec,
                            split_rate_kernels=split_rate_kernels,
                            data_filename=os.path.abspath(
                                os.path.join(work_dir, 'data.bin')),
                            data_order=order,
                            platform=platform,
                            skip_jac=True)

            # first create the executable (via libgen)
            tester = generate_library(lang, build, obj_dir=obj, out_dir=lib,
                                      shared=True,
                                      btype=build_type.species_rates,
                                      as_executable=True)

            # and do runs
            with open(os.path.join(subdir, outname), 'a+') as file:
                for i in range(todo):
                    print(i, "/", todo)
                    subprocess.check_call([os.path.join(lib, tester),
                                           str(num_states), str(1)],
                                          stdout=file)
Exemple #3
0
def test_strided_copy(state):
    lang = state['lang']
    order = state['order']
    depth = state['depth']
    width = state['width']

    # cleanup
    clean_dir(build_dir)
    clean_dir(obj_dir)
    clean_dir(lib_dir)

    # create
    utils.create_dir(build_dir)
    utils.create_dir(obj_dir)
    utils.create_dir(lib_dir)

    vec_size = depth if depth else (width if width else 0)
    # set max per run such that we will have a non-full run (1024 - 1008)
    # this should also be evenly divisible by depth and width
    # (as should the non full run)
    max_per_run = 16
    # number of ics should be divisibly by depth and width
    ics = max_per_run * 8 + vec_size
    if vec_size:
        assert ics % vec_size == 0
        assert max_per_run % vec_size == 0
        assert int(np.floor(ics / max_per_run) * max_per_run) % vec_size == 0
    dtype = np.dtype('float64')

    # create test arrays
    def __create(shape):
        if not isinstance(shape, tuple):
            shape = (shape, )
        shape = (ics, ) + shape
        arr = np.zeros(shape, dtype=dtype, order=order)
        arr.flat[:] = np.arange(np.prod(shape))
        return arr

    arrays = [
        __create(16),
        __create(10),
        __create(20),
        __create((20, 20)),
        __create(())
    ]
    const = [np.arange(10, dtype=dtype)]
    lp_arrays = [lp.GlobalArg('a{}'.format(i), shape=('problem_size',) + a.shape[1:],
                              order=order, dtype=(arrays + const)[i].dtype)
                 for i, a in enumerate(arrays)] + \
                [lp.TemporaryVariable('a{}'.format(i + len(arrays)), dtype=dtype,
                 order=order, initializer=const[i], read_only=True,
                 shape=const[i].shape) for i in range(len(const))]
    const = lp_arrays[len(arrays):]

    dtype = 'double'

    # create array splitter
    opts = type('', (object, ), {
        'width': width,
        'depth': depth,
        'order': order,
        'lang': lang
    })
    asplit = array_splitter(opts)

    # split numpy
    arrays = asplit.split_numpy_arrays(arrays)
    # make dummy knl
    knl = lp.make_kernel(
        '{[i]: 0 <= i <= 1}', """
                            if i > 1
                                a0[i, i] = 0
                                a1[i, i] = 0
                                a2[i, i] = 0
                                a3[i, i, i] = 0
                                a4[i] = 0
                                <> k = a5[i]
                            end
                         """, lp_arrays)
    # split loopy
    lp_arrays = asplit.split_loopy_arrays(knl).args

    # now create a simple library
    mem = memory_manager(opts.lang,
                         opts.order,
                         asplit._have_split(),
                         dev_type=state['device_type'],
                         strided_c_copy=lang == 'c')
    mem.add_arrays([x for x in lp_arrays],
                   in_arrays=[x.name for x in lp_arrays if x not in const],
                   out_arrays=[x.name for x in lp_arrays if x not in const],
                   host_constants=const)

    # create "kernel"
    size_type = 'int'
    lang_headers = []
    if lang == 'opencl':
        lang_headers.extend([
            '#include "memcpy_2d.oclh"', '#include "vectorization.oclh"',
            '#include <CL/cl.h>', '#include "ocl_errorcheck.oclh"'
        ])
        size_type = 'cl_uint'
    elif lang == 'c':
        lang_headers.extend(
            ['#include "memcpy_2d.h"', '#include "error_check.h"'])

    # kernel must copy in and out, using the mem_manager's format
    knl = Template("""
    for (size_t offset = 0; offset < problem_size; offset += per_run)
    {
        ${type} this_run = problem_size - offset < per_run ? \
            problem_size - offset : per_run;
        /* Memory Transfers into the kernel, if any */
        ${mem_transfers_in}

        /* Memory Transfers out */
        ${mem_transfers_out}
    }
    """).safe_substitute(type=size_type,
                         mem_transfers_in=mem._mem_transfers(
                             to_device=True, host_postfix='_save'),
                         mem_transfers_out=mem.get_mem_transfers_out(),
                         problem_size=ics)

    # create the host memory allocations
    host_names = ['h_' + arr.name for arr in lp_arrays]
    host_allocs = mem.get_mem_allocs(True, host_postfix='')

    # device memory allocations
    device_allocs = mem.get_mem_allocs(False)

    # copy to save for test
    host_name_saves = ['h_' + a.name + '_save' for a in lp_arrays]
    host_const_allocs = mem.get_host_constants()
    host_copies = [
        Template("""
        ${type} ${save} [${size}] = {${vals}};
        memset(${host}, 0, ${size} * sizeof(${type}));
        """).safe_substitute(save='h_' + lp_arrays[i].name + '_save',
                             host='h_' + lp_arrays[i].name,
                             size=arrays[i].size,
                             vals=', '.join(
                                 [str(x) for x in arrays[i].flatten()]),
                             type=dtype) for i in range(len(arrays))
    ]

    # and finally checks
    check_template = Template("""
        for(int i = 0; i < ${size}; ++i)
        {
            assert(${host}[i] == ${save}[i]);
        }
    """)
    checks = [
        check_template.safe_substitute(host=host_names[i],
                                       save=host_name_saves[i],
                                       size=arrays[i].size)
        for i in range(len(arrays))
    ]

    # and preambles
    ocl_preamble = """
    double* temp_d;
    int* temp_i;
    // create a context / queue
    int lim = 10;
    cl_uint num_platforms;
    cl_uint num_devices;
    cl_platform_id platform [lim];
    cl_device_id device [lim];
    cl_int return_code;
    cl_context context;
    cl_command_queue queue;
    check_err(clGetPlatformIDs(lim, platform, &num_platforms));
    for (int i = 0; i < num_platforms; ++i)
    {
        check_err(clGetDeviceIDs(platform[i], CL_DEVICE_TYPE_ALL, lim, device,
            &num_devices));
        if(num_devices > 0)
            break;
    }
    context = clCreateContext(NULL, 1, &device[0], NULL, NULL, &return_code);
    check_err(return_code);

    //create queue
    queue = clCreateCommandQueue(context, device[0], 0, &return_code);
    check_err(return_code);
    """
    preamble = ''
    if lang == 'opencl':
        preamble = ocl_preamble

    end = ''
    if lang == 'opencl':
        end = """
        check_err(clFlush(queue));
        check_err(clReleaseCommandQueue(queue));
        check_err(clReleaseContext(context));
    """

    file_src = Template("""
${lang_headers}
#include <stdlib.h>
#include <string.h>
#include <assert.h>


void main()
{
    ${preamble}

    double zero [${max_dim}] = {0};

    ${size_type} problem_size = ${problem_size};
    ${size_type} per_run = ${max_per_run};

    ${host_allocs}
    ${host_const_allocs}
    ${mem_declares}
    ${device_allocs}

    ${mem_saves}

    ${host_constant_copy}

    ${knl}

    ${checks}

    ${end}

    exit(0);
}
    """).safe_substitute(lang_headers='\n'.join(lang_headers),
                         mem_declares=mem.get_defns(),
                         host_allocs=host_allocs,
                         host_const_allocs=host_const_allocs,
                         device_allocs=device_allocs,
                         mem_saves='\n'.join(host_copies),
                         host_constant_copy=mem.get_host_constants_in(),
                         checks='\n'.join(checks),
                         knl=knl,
                         preamble=preamble,
                         end=end,
                         size_type=size_type,
                         max_per_run=max_per_run,
                         problem_size=ics,
                         max_dim=max([x.size for x in arrays]))

    # write file
    fname = os.path.join(build_dir, 'test' + utils.file_ext[lang])
    with open(fname, 'w') as file:
        file.write(file_src)
    files = [fname]

    # write aux
    write_aux(build_dir, opts, [], [])

    # copy any deps
    def __copy_deps(lang,
                    scan_path,
                    out_path,
                    change_extension=True,
                    ffilt=None):
        deps = [
            x for x in os.listdir(scan_path)
            if os.path.isfile(os.path.join(scan_path, x))
            and not x.endswith('.in')
        ]
        if ffilt is not None:
            deps = [x for x in deps if ffilt in x]
        files = []
        for dep in deps:
            dep_dest = dep
            dep_is_header = dep.endswith(utils.header_ext[lang])
            ext = (utils.file_ext[lang]
                   if not dep_is_header else utils.header_ext[lang])
            if change_extension and not dep.endswith(ext):
                dep_dest = dep[:dep.rfind('.')] + ext
            shutil.copyfile(os.path.join(scan_path, dep),
                            os.path.join(out_path, dep_dest))
            if not dep_is_header:
                files.append(os.path.join(out_path, dep_dest))
        return files

    scan = os.path.join(script_dir, os.pardir, 'kernel_utils', lang)
    files += __copy_deps(lang, scan, build_dir)
    scan = os.path.join(script_dir, os.pardir, 'kernel_utils', 'common')
    files += __copy_deps(host_langs[lang],
                         scan,
                         build_dir,
                         change_extension=False,
                         ffilt='memcpy_2d')

    # build
    files = [
        file_struct(lang, lang, f[:f.rindex('.')], [build_dir], [], build_dir,
                    obj_dir, True, True) for f in files
    ]
    assert not any(compiler(x) for x in files)
    lib = libgen(lang, obj_dir, lib_dir, [x.filename for x in files], True,
                 False, True)
    lib = os.path.join(lib_dir, lib)
    # and run
    subprocess.check_call(lib)
Exemple #4
0
    def test_read_initial_conditions(self):
        build_dir = self.store.build_dir
        obj_dir = self.store.obj_dir
        lib_dir = self.store.lib_dir
        setup = test_utils.get_read_ics_source()
        utils.create_dir(build_dir)
        utils.create_dir(obj_dir)
        utils.create_dir(lib_dir)
        oploop = OptionLoop(
            OrderedDict([
                # no need to test conv
                ('conp', [True]),
                ('order', ['C', 'F']),
                ('depth', [4, None]),
                ('width', [4, None]),
                ('lang', ['c'])
            ]))
        for state in oploop:
            if state['depth'] and state['width']:
                continue
            self.__cleanup(False)
            # create dummy loopy opts
            opts = type('', (object, ), state)()
            asplit = array_splitter(opts)

            # get source
            path = os.path.realpath(
                os.path.join(self.store.script_dir, os.pardir, 'kernel_utils',
                             'common', 'read_initial_conditions.c.in'))

            with open(path, 'r') as file:
                ric = Template(file.read())
            # subs
            ric = ric.safe_substitute(mechanism='mechanism.h',
                                      vectorization='vectorization.h')
            # write
            with open(os.path.join(build_dir, 'read_initial_conditions.c'),
                      'w') as file:
                file.write(ric)
            # write header
            write_aux(build_dir, opts, self.store.specs, self.store.reacs)
            # write setup
            with open(os.path.join(build_dir, 'setup.py'), 'w') as file:
                file.write(setup.safe_substitute(buildpath=build_dir))
            # copy read ics header to final dest
            shutil.copyfile(
                os.path.join(self.store.script_dir, os.pardir, 'kernel_utils',
                             'common', 'read_initial_conditions.h'),
                os.path.join(build_dir, 'read_initial_conditions.h'))
            # copy wrapper
            shutil.copyfile(
                os.path.join(self.store.script_dir, 'test_utils',
                             'read_ic_wrapper.pyx'),
                os.path.join(build_dir, 'read_ic_wrapper.pyx'))
            # setup
            python_str = 'python{}.{}'.format(sys.version_info[0],
                                              sys.version_info[1])
            call = [
                python_str,
                os.path.join(build_dir, 'setup.py'), 'build_ext',
                '--build-lib', lib_dir
            ]
            subprocess.check_call(call)
            # copy in tester
            shutil.copyfile(
                os.path.join(self.store.script_dir, 'test_utils',
                             'ric_tester.py'),
                os.path.join(lib_dir, 'ric_tester.py'))

            # For simplicity (and really, lack of need) we test CONP only
            # hence, the extra variable is the volume, while the fixed parameter
            # is the pressure

            # save phi, param in correct order
            phi = (self.store.phi_cp if opts.conp else self.store.phi_cv)
            save_phi, = asplit.split_numpy_arrays(phi)
            save_phi = save_phi.flatten(opts.order)
            param = self.store.P if opts.conp else self.store.V
            save_phi.tofile(os.path.join(lib_dir, 'phi_test.npy'))
            param.tofile(os.path.join(lib_dir, 'param_test.npy'))

            # save bin file
            out_file = np.concatenate(
                (
                    np.reshape(phi[:, 0], (-1, 1)),  # temperature
                    np.reshape(param, (-1, 1)),  # param
                    phi[:, 1:]),
                axis=1  # species
            )
            out_file = out_file.flatten('K')
            with open(os.path.join(lib_dir, 'data.bin'), 'wb') as file:
                out_file.tofile(file)

            # and run
            subprocess.check_call([
                python_str,
                os.path.join(lib_dir, 'ric_tester.py'), opts.order,
                str(self.store.test_size)
            ])