Ejemplo n.º 1
0
        with open(os.path.join(sys.argv[-1], MAX_XS_FILE)) as j:
            max_xs = MaxSigma(json.load(j))

##################################### Calculate count info of about each reactions #####################
IRRADIATION_DURATION = 3600  # spread the irradiation power over the entire course of this length
TRANSIT_DURATION = 86400 * 3
MEASUREMENT_DURATION = 3600 * 3
PRE_MEASUREMENT_POPULATION_FILE = "pre-measurement_population.json"
POST_MEASUREMENT_POPULATION_FILE = "post-measurement_population.json"

if __name__ == "__main__":
    from misc_library import BARN, MM_CM, get_apriori, decay_mat_exp_num_decays, decay_mat_exp_population_convolved, build_decay_chain_tree, linearize_decay_chain, EncoderOpenMC

    tprint("Stage 2: Calculating information about each reaction:")

    apriori_flux, apriori_fluence = get_apriori(sys.argv[-1],
                                                IRRADIATION_DURATION)

    # get the production rate for each reaction, and build that into a dataframe (which will be expanded upon)
    population = pd.DataFrame(
        {
            'production of primary product per reactant atom':
            (sigma_df.values * BARN) @ apriori_fluence
        },
        index=sigma_df.index)
    del sigma_df
    gc.collect(
    )  # sigma_df is now used and will not be called again; remove it from memory to reduce memory usage/ stop stuffing up RAM.

    # create containers to contain the calculated activities
    # detected_counts_per_primary_product = {}
    population_pre, population_post = defaultdict(dict), defaultdict(
Ejemplo n.º 2
0
    with open(os.path.join(sys.argv[-1], "decay_radiation.json")) as j:
        decay_dict = unserialize_dict(json.load(j))
    with open(os.path.join(sys.argv[-1], "decay_info.json")) as j:
        decay_info = unserialize_dict(
            json.load(j)
        )  # the decay counts given the stated starting and ending measurement times.
    with open(os.path.join(sys.argv[-1],
                           "post-measurement_population.json")) as j:
        post_measurement_population = json.load(j)
    abs_efficieny_curve = HPGe_efficiency_curve_generator()
    PARAMS_DICT = get_parameters_json(sys.argv[-1])
    IRRADIATION_DURATION = PARAMS_DICT["IRRADIATION_DURATION"]
    b, c = IRRADIATION_DURATION + PARAMS_DICT[
        "TRANSIT_DURATION"], IRRADIATION_DURATION + PARAMS_DICT[
            "TRANSIT_DURATION"] + PARAMS_DICT["MEASUREMENT_DURATION"]
    APRIORI_FLUX, APRIORI_FLUENCE = get_apriori(sys.argv[-1],
                                                IRRADIATION_DURATION)
    sigma_df = pd.read_csv(os.path.join(sys.argv[-1], "microscopic_xs.csv"),
                           index_col=[0])
    gs = pd.read_csv(os.path.join(sys.argv[-1], 'gs.csv')).values

    def get_macro_and_num_reactions(parents_product_mts,
                                    foil_num_density_dict):
        """
        Calculate the macroscopic cross-section and partial reaction rate contribution variables.
        """
        macroscopic_xs = np.zeros(len(gs))
        for parent_product_mt in unpack_reactions(parents_product_mts):
            microscopic_xs = sigma_df[sigma_df.index ==
                                      parent_product_mt].values[0] * BARN
            macroscopic_xs += microscopic_xs * foil_num_density_dict[
                parent_product_mt.split("-")[0]]
Ejemplo n.º 3
0
def run_():
    from .read_data import *

    def tprint(*msg):
        print("\n[t=+{:2.2f}s]".format(time.time() - prog_start_time), *msg)

    prog_start_time = time.time()

    stage1_outputs_exist = all(
        os.path.exists(os.path.join(sys.argv[-1], FULL_DECAY_INFO_FILE)),
        os.path.exists(os.path.join(sys.argv[-1], CONDENSED_DECAY_INFO_FILE)),
        os.path.exists(os.path.join(sys.argv[-1], MICROSCOPIC_XS_CSV)),
        os.path.exists(os.path.join(sys.argv[-1], MAX_XS_FILE)))
    if not stage1_outputs_exist:
        from misc_library import EncoderOpenMC, MT_to_nuc_num, load_endf_directories, FISSION_MTS, AMBIGUOUS_MT

        prog_start_time = time.time()
        assert os.path.exists(os.path.join(
            sys.argv[-1],
            'gs.csv')), "Output directory must already have gs.csv"
        gs = pd.read_csv(os.path.join(sys.argv[-1], 'gs.csv')).values
        if SORT_BY_REACTION_RATE:
            _msg = f"Output directory must already have integrated_apriori.csv in order to sort the {MICROSCOPIC_XS_CSV} in descending order of expected-radionuclide-population later on."
            assert os.path.exists(
                os.path.join(sys.argv[-1], 'integrated_apriori.csv')), _msg
            apriori = pd.read_csv(
                os.path.join(sys.argv[-1],
                             'integrated_apriori.csv'))['value'].values
        endf_file_list = load_endf_directories(sys.argv[1:-1])
        print(f"Loaded {len(endf_file_list)} different material files,\n")

        # First compile the decay records
        tprint(
            "Stage 1: Compiling the decay information as decay_dict, and recording the excited-state to isomeric-state information:"
        )
        decay_dict = OrderedDict()  # dictionary of decay data
        isomeric_to_excited_state = OrderedDict(
        )  # a dictionary that translates all
        with warnings.catch_warnings(record=True) as w_list:
            for file in tqdm(endf_file_list):
                name = str(file.target["atomic_number"]).zfill(
                    3) + ATOMIC_SYMBOL[file.target["atomic_number"]] + str(
                        file.target["mass_number"])
                isomeric_name = name  # make a copy
                if file.target[
                        "isomeric_state"] > 0:  # if it is not at the lowest isomeric state: add the _e behind it too.
                    isomeric_name += "_m" + str(file.target["isomeric_state"])
                    name += "_e" + str(file.target["state"])
                isomeric_to_excited_state[isomeric_name] = name[
                    3:]  # trim the excited state name

                if file.info[
                        'sublibrary'] == "Radioactive decay data":  # applicable to materials with (mf, mt) = (8, 457) file section
                    dec_f = Decay.from_endf(file)
                    decay_dict[name] = extract_decay(dec_f)
        if w_list:
            print(w_list[0].filename + ", line {}, {}'s:".format(
                w_list[0].lineno, w_list[0].category.__name__))
            for w in w_list:
                print("    " + str(w.message))
        decay_dict = sort_and_trim_ordered_dict(
            decay_dict)  # reorder it so that it conforms to the
        isomeric_to_excited_state = sort_and_trim_ordered_dict(
            isomeric_to_excited_state)
        tprint(
            "Renaming the decay products from isomeric state names to excited state names:"
        )
        decay_dict = rename_branching_ratio(decay_dict,
                                            isomeric_to_excited_state)

        # Save said decay records
        with open(os.path.join(sys.argv[-1], FULL_DECAY_INFO_FILE), 'w') as j:
            tprint("Saving the decay spectra as {} ...".format(
                FULL_DECAY_INFO_FILE))
            json.dump(decay_dict, j, cls=EncoderOpenMC)

        # turn decay records into number of counts
        tprint("Condensing each decay spectrum...")
        for name, dec_file in tqdm(decay_dict.items()):
            condense_spectrum(dec_file)

        with open(os.path.join(sys.argv[-1], CONDENSED_DECAY_INFO_FILE),
                  'w') as j:
            tprint("Saving the condensed decay information as {} ...".format(
                CONDENSED_DECAY_INFO_FILE))
            json.dump(decay_dict, j, cls=EncoderOpenMC)

        # Then compile the Incident-neutron records
        tprint("Compiling the raw cross-section dictionary.")
        xs_dict = OrderedDict()
        for file in tqdm(endf_file_list):
            # mf10 = {}
            if file.info['sublibrary'] == "Incident-neutron data":
                inc_f = IncidentNeutron.from_endf(file)
                nuc_sort_name = str(inc_f.atomic_number).zfill(3) + inc_f.name

                # get the higher-energy range values of xs as well if available.
                mf10_mt5 = MF10(file.section.get(
                    (10, 5),
                    None))  # default value = None if (10, 5 doesn't exist.)
                for (izap, isomeric_state), xs in mf10_mt5.items():
                    atomic_number, mass_number = divmod(izap, 1000)
                    if atomic_number > 0 and mass_number > 0:  # ignore the weird products that means nothing meaningful
                        isomeric_name = ATOMIC_SYMBOL[atomic_number] + str(
                            mass_number)
                        if isomeric_state > 0:
                            isomeric_name += "_m" + str(isomeric_state)
                        e_name = isomeric_to_excited_state.get(
                            isomeric_name,
                            isomeric_name.split("_")[0]
                        )  # return the ground state name if there is no corresponding excited state name for such isomer.
                        long_name = nuc_sort_name + "-" + e_name + "-MT=5"
                        xs_dict[long_name] = xs

                # get the normal reactions, found in mf=3
                for mt, rx in inc_f.reactions.items():
                    if any([(mt in AMBIGUOUS_MT), (mt in FISSION_MTS),
                            (301 <= mt <= 459)]):
                        continue  # skip the cases of AMBIGUOUS_MT, fission mt, and heating information. They don't give us useful information about radionuclides produced.

                    append_name_list, xs_list = extract_xs(inc_f.atomic_number,
                                                           inc_f.mass_number,
                                                           rx,
                                                           tabulated=True)
                    # add each product into the dictionary one by one.
                    for name, xs in zip(append_name_list, xs_list):
                        xs_dict[nuc_sort_name + '-' + name] = xs

        # memory management
        print(
            "Deleting endf_file_list since it will no longer be used in this script, in an attempt to reduce memory usage"
        )
        del endf_file_list
        gc.collect()
        # del decay_dict

        xs_dict = sort_and_trim_ordered_dict(xs_dict)

        tprint(
            "Collapsing the cross-section to the group structure specified by 'gs.csv' and then saving it as '{}' ..."
            .format(MICROSCOPIC_XS_CSV))
        sigma_df, max_xs = collapse_xs(xs_dict, gs)
        with open(os.path.join(sys.argv[-1], MAX_XS_FILE), "w") as j:
            json.dump(max_xs, j)
        del xs_dict
        gc.collect()

        if not SHOW_SEPARATE_MT_REACTION_RATES:
            sigma_df = merge_identical_parent_products(sigma_df)
            # Need to make merge_identical_parent_products to work with max_xs as well.

        if SORT_BY_REACTION_RATE:
            sigma_df = sigma_df.loc[ary(sigma_df.index)[np.argsort(
                sigma_df.values @ apriori)[::-1]]]
        print(
            f"Saving the cross-sections in the required group structure to file as '{MICROSCOPIC_XS_CSV}'..."
        )
        sigma_df.to_csv(os.path.join(sys.argv[-1], MICROSCOPIC_XS_CSV))
        # saves the number of radionuclide produced per (neutron cm^-2) of fluence flash-irradiated in that given bin.

        # save parameters at the end.
        PARAMS_DICT = dict(
            HPGe_eff_file=HPGe_eff_file,
            gamma_E=gamma_E,
            FISSION_MTS=FISSION_MTS,
            AMBIGUOUS_MT=AMBIGUOUS_MT,
            SORT_BY_REACTION_RATE=SORT_BY_REACTION_RATE,
            SHOW_SEPARATE_MT_REACTION_RATES=SHOW_SEPARATE_MT_REACTION_RATES,
            CONDENSED_DECAY_INFO_FILE=CONDENSED_DECAY_INFO_FILE,
            FULL_DECAY_INFO_FILE=FULL_DECAY_INFO_FILE,
            MAX_XS_FILE=MAX_XS_FILE)
        PARAMS_DICT.update({sys.argv[0] + " argv": sys.argv[1:]})
        save_parameters_as_json(sys.argv[-1], PARAMS_DICT)
        tprint("Stage 1: Data reading from", *sys.argv[1:], "complete!")
    else:
        tprint(
            f"Assuming that Stage 1 is complete. Reading {MICROSCOPIC_XS_CSV} as sigma_df and {CONDENSED_DECAY_INFO_FILE} as decay_dict..."
        )
        from misc_library import unserialize_dict
        sigma_df = pd.read_csv(os.path.join(sys.argv[-1], MICROSCOPIC_XS_CSV),
                               index_col=[0])
        with open(os.path.join(sys.argv[-1], CONDENSED_DECAY_INFO_FILE)) as j:
            decay_dict = json.load(j)
            decay_dict = unserialize_dict(decay_dict)
        with open(os.path.join(sys.argv[-1], MAX_XS_FILE)) as j:
            max_xs = MaxSigma(json.load(j))

    from misc_library import BARN, MM_CM, get_apriori, decay_mat_exp_num_decays, decay_mat_exp_population_convolved, build_decay_chain_tree, linearize_decay_chain, EncoderOpenMC

    tprint("Stage 2: Calculating information about each reaction:")

    apriori_flux, apriori_fluence = get_apriori(sys.argv[-1],
                                                IRRADIATION_DURATION)

    # get the production rate for each reaction, and build that into a dataframe (which will be expanded upon)
    population = pd.DataFrame(
        {
            'production of primary product per reactant atom':
            (sigma_df.values * BARN) @ apriori_fluence
        },
        index=sigma_df.index)
    del sigma_df
    gc.collect(
    )  # sigma_df is now used and will not be called again; remove it from memory to reduce memory usage/ stop stuffing up RAM.

    # create containers to contain the calculated activities
    # detected_counts_per_primary_product = {}
    population_pre, population_post = defaultdict(dict), defaultdict(
        dict)  # key = the name of the subchain
    total_counts, total_counts_post_meas, activity_pre, activity_post, cnt_rate_pre, cnt_rate_post = {}, {}, {}, {}, {}, {}

    tprint(
        "Calculating the expected number of photopeak counts for each type of product created:"
    )
    product_set = set([
        parent_product_mt.split("-")[1]
        for parent_product_mt in population.index
    ])
    PRE_MEASUREMENT = IRRADIATION_DURATION + TRANSIT_DURATION
    POST_MEASUREMENT = IRRADIATION_DURATION + TRANSIT_DURATION + MEASUREMENT_DURATION

    for product in tqdm(product_set):
        detected_counts_per_primary_product = []
        for subchain in linearize_decay_chain(
                build_decay_chain_tree(product, decay_dict)):
            new_pathway = {
                "pathway":
                "-".join(subchain.names),
                "counts during measurement":
                decay_mat_exp_num_decays(
                    subchain.branching_ratios, subchain.decay_constants,
                    IRRADIATION_DURATION, PRE_MEASUREMENT, POST_MEASUREMENT) *
                subchain.countable_photons,
                "counts in 1 hr immediately after measurement":
                decay_mat_exp_num_decays(
                    subchain.branching_ratios, subchain.decay_constants,
                    IRRADIATION_DURATION, POST_MEASUREMENT,
                    POST_MEASUREMENT + 3600) * subchain.countable_photons
            }
            population_pre[product][
                new_pathway["pathway"]] = decay_mat_exp_population_convolved(
                    subchain.branching_ratios, subchain.decay_constants,
                    IRRADIATION_DURATION, PRE_MEASUREMENT)
            population_post[product][
                new_pathway["pathway"]] = decay_mat_exp_population_convolved(
                    subchain.branching_ratios, subchain.decay_constants,
                    IRRADIATION_DURATION, POST_MEASUREMENT)

            new_pathway[
                "decay rate pre-measurement"] = subchain.decay_constants[
                    -1] * population_pre[product][new_pathway["pathway"]]
            new_pathway[
                "decay rate post-measurement"] = subchain.decay_constants[
                    -1] * population_post[product][new_pathway["pathway"]]
            new_pathway[
                "count rate pre-measurement"] = subchain.countable_photons * new_pathway[
                    "decay rate pre-measurement"]
            new_pathway[
                "count rate post-measurement"] = subchain.countable_photons * new_pathway[
                    "decay rate post-measurement"]

            detected_counts_per_primary_product.append(new_pathway)

        total_counts[product] = sum([
            path["counts during measurement"]
            for path in detected_counts_per_primary_product
        ])
        total_counts_post_meas[product] = sum([
            path["counts in 1 hr immediately after measurement"]
            for path in detected_counts_per_primary_product
        ])
        activity_pre[product] = sum([
            path["decay rate pre-measurement"]
            for path in detected_counts_per_primary_product
        ])
        activity_post[product] = sum([
            path["decay rate post-measurement"]
            for path in detected_counts_per_primary_product
        ])
        cnt_rate_pre[product] = sum([
            path["count rate pre-measurement"]
            for path in detected_counts_per_primary_product
        ])
        cnt_rate_post[product] = sum([
            path["count rate post-measurement"]
            for path in detected_counts_per_primary_product
        ])

    # clean out reactions that can't be detected.
    tprint(
        "Removing all reactions whose products is the same as the product (i.e. elastic scattering reactions):"
    )
    population = population[ary([
        parent_product_mt.split("-")[0] != parent_product_mt.split("-")[1]
        for parent_product_mt in population.index
    ])]
    # add the total counts of gamma photons detectable per primary product column

    tprint("Matching the reactions to their decay product count...")
    # add the final counts measured by detector PPP column
    rearranged_total_cnts = ary([
        total_counts[parent_product_mt.split("-")[1]]
        for parent_product_mt in population.index
    ])
    population['final counts measured by detector PPP'] = rearranged_total_cnts

    # sort by activity and remove all nans
    tprint(
        "Re-ordering the dataframe by the final counts measured by detector PPP and removing the entries with zero counts..."
    )
    population.sort_values('final counts measured by detector PPP',
                           inplace=True,
                           ascending=False)
    population = population[population["final counts measured by detector PPP"]
                            > 0.0]  # keeping only those with positive counts.

    # save the population breakdown in total_counts
    tprint("Saving the population breakdowns as .json files...")
    all_significant_products = ordered_set([
        parent_product_mt.split("-")[1]
        for parent_product_mt in population.index
    ])
    with open(os.path.join(sys.argv[-1], PRE_MEASUREMENT_POPULATION_FILE),
              "w") as j:
        json.dump(
            {
                product: population_pre[product]
                for product in all_significant_products
            },
            j,
            cls=EncoderOpenMC)
        # del population_pre; gc.collect()
    with open(os.path.join(sys.argv[-1], POST_MEASUREMENT_POPULATION_FILE),
              "w") as j:
        json.dump(
            {
                product: population_post[product]
                for product in all_significant_products
            },
            j,
            cls=EncoderOpenMC)
        # del population_post; gc.collect()

    # add the rest of the information
    rearranged_post_cnt_meas = ary([
        total_counts_post_meas[parent_product_mt.split("-")[1]]
        for parent_product_mt in population.index
    ])
    rearranged_activity_pre = ary([
        activity_pre[parent_product_mt.split("-")[1]]
        for parent_product_mt in population.index
    ])
    rearranged_activity_post = ary([
        activity_post[parent_product_mt.split("-")[1]]
        for parent_product_mt in population.index
    ])
    rearranged_cnt_rate_pre = ary([
        cnt_rate_pre[parent_product_mt.split("-")[1]]
        for parent_product_mt in population.index
    ])
    rearranged_cnt_rate_post = ary([
        cnt_rate_post[parent_product_mt.split("-")[1]]
        for parent_product_mt in population.index
    ])
    rearranged_max_xs = ary(
        [max_xs[parent_product_mt] for parent_product_mt in population.index])
    population[
        "counts accumulated in the 1 hour following the detection period PPP"] = rearranged_post_cnt_meas  # column used to quickly extrapolate the post-measurement decay rate
    population["activity before measurement PPP"] = rearranged_activity_pre
    population["activity after measurement PPP"] = rearranged_activity_post
    population[
        "detector count rate before measurement PPP"] = rearranged_cnt_rate_pre
    population[
        "detector count rate after measurement PPP"] = rearranged_cnt_rate_post
    population["max microscopic cross-section"] = rearranged_max_xs

    tprint("Saving as 'counts.csv'...")
    try:
        population.to_csv(os.path.join(sys.argv[-1], 'counts.csv'),
                          index_label='rname')
    except ZeroDivisionError:
        tprint(
            "Minor issue when trying to print the values which are too small. Plese wait for a couple more minutes..."
        )
        not_uncertain_columns = [
            "production of primary product per reactant atom",
            "max microscopic cross-section",
        ]
        for col in population.columns:
            if col not in not_uncertain_columns:
                # when trying to express uncertainties.core.Variable using the  __str__ method, it will try to factorize it.
                # But if the GCD between the norminal value and uncertainty is rounded down to 1E-323 or smaller, it will lead to ZeroDivisionError.
                floating_point_problem = population[
                    col] < 12.5E-324  # therefore we set all small values
                # this method is harsher than it needs to because it forces items
                # with nominal value < 12.5E-324 but error > 12.5E-324 to be 0 as well,
                # even though they are perfectly expressible as strings without errors.
                population[col][floating_point_problem] = 0
        population.to_csv(os.path.join(sys.argv[-1], 'counts.csv'),
                          index_label='rname')
    save_parameters_as_json(
        sys.argv[-1],
        dict(
            IRRADIATION_DURATION=IRRADIATION_DURATION,
            TRANSIT_DURATION=TRANSIT_DURATION,
            MEASUREMENT_DURATION=MEASUREMENT_DURATION,
            # PRE_MEASUREMENT_POPULATION_FILE=PRE_MEASUREMENT_POPULATION_FILE,
            # POST_MEASUREMENT_POPULATION_FILE=POST_MEASUREMENT_POPULATION_FILE,
        ))
    tprint("Run complete. See results in 'counts.csv'.")
Ejemplo n.º 4
0
def run():
    def tprint(*msg):
        print("\n[t=+{:2.2f}s]".format(time.time()-prog_start_time), *msg)

    from .get_reaction_rates import *
    # typical system/python stuff
    import sys, os, time, json
    from tqdm import tqdm

    # numerical packages
    from numpy import array as ary; import numpy as np
    from numpy import log as ln; from numpy import sqrt
    import pandas as pd

    # openmc/specialist packages
    import openmc
    import uncertainties
    from uncertainties.core import Variable
    #collections
    from collections import namedtuple, OrderedDict
    # foilselector
    from misc_library import save_parameters_as_json, unserialize_dict
    from misc_library import BARN, MM_CM, get_apriori, decay_mat_exp_num_decays, Bateman_convolved_generator

    # main script    
    prog_start_time = time.time()
    apriori_flux, apriori_fluence = get_apriori(sys.argv[-1], IRRADIATION_DURATION)

    with open(os.path.join(sys.argv[-1], CONDENSED_DECAY_INFO_FILE), 'r') as f:
        decay_dict = json.load(f)
        decay_dict = unserialize_dict(decay_dict)
    sigma_df = pd.read_csv(os.path.join(sys.argv[-1], 'response.csv'), index_col=[0])

    count_contribution_per_primary_product, total_counts_per_primary_product = {}, {}
    tprint("Calculating the expected number of photopeak counts for each type of product created:")
    product_set = set([parent_product_mt.split("-")[1] for parent_product_mt in sigma_df.index])
    for product in tqdm(product_set):
        count_contribution_per_primary_product[product] = [{
                    'pathway': '-'.join(subchain.names),
                            # (# of photons detected per nuclide n decayed) = (# of photons detected per decay of nuclide n) * lambda_n * \int(population)dT
                    # 'counts':Bateman_num_decays_factorized(subchain.branching_ratios, subchain.decay_constants,
                    'counts':np.product(subchain.branching_ratios[1:])*
                            decay_mat_exp_num_decays(subchain.decay_constants, 
                            IRRADIATION_DURATION,
                            IRRADIATION_DURATION+TRANSIT_DURATION,
                            IRRADIATION_DURATION+TRANSIT_DURATION+MEASUREMENT_DURATION
                            )*subchain.countable_photons,
                    } for subchain in linearize_decay_chain(build_decay_chain(product, decay_dict))]
        total_counts_per_primary_product[product] = sum([path["counts"] for path in count_contribution_per_primary_product[product]])
    # get the production rate for each reaction
    population = pd.DataFrame({'production of primary product per reactant atom':(sigma_df.values*BARN) @ apriori_fluence}, index=sigma_df.index)
    # clean out reactions that can't be detected.
    tprint("Removing all reactions whose products is the same as the product (i.e. elastic scattering reactions):")
    population = population[ary([parent_product_mt.split("-")[0] != parent_product_mt.split("-")[1] for parent_product_mt in population.index])]
    # add the total counts of gamma photons detectable per primary product column
    tprint("Matching the reactions to their decay product count...")
    gamma_counts_at_measurement_per_reactant = ary([total_counts_per_primary_product[parent_product_mt.split("-")[1]] for parent_product_mt in sigma_df.index])
    # add the final counts accumulated per reactant atom column
    population['final counts accumulated per reactant atom'] = gamma_counts_at_measurement_per_reactant * population['production of primary product per reactant atom']
    # sort by activity and remove all nans
    tprint("Re-ordering the dataframe by the final counts accumulated per reactant atom and removing the entries with zero counts...")
    population.sort_values('final counts accumulated per reactant atom', inplace=True, ascending=False)
    population = population[gamma_counts_at_measurement_per_reactant>0.0] # keeping only those with positive counts.

    if GUESS_MATERIAL:
        from misc_library import extract_elem_from_string, pick_material, PHYSICAL_PROP_FILE, get_physical_property
        # read the physical property file to get the number densities.
        tprint(f"Reading ./{os.path.relpath(PHYSICAL_PROP_FILE)} to extract the physical parameters about various solids.")
        physical_prop = get_physical_property(PHYSICAL_PROP_FILE)

        # select the default materials and get its relevant parameters
        default_material, partial_number_density = [], []

        tprint("Selecting the default material to be used:")
        for parent_product_mt in tqdm(population.index):
            parent = parent_product_mt.split('-')[0]
            if parent[len(extract_elem_from_string(parent)):]=='0': # take care of the species which are a MIXED natural composition of materials, e.g. Gd0
                parent = parent[:-1]
            if ENRICH_TO_100_PERCENT: # allowing enrichment means 100% of that element being made of the specified isotope only
                parent = extract_elem_from_string(parent)
            if parent not in physical_prop.columns:
                # if there isn't a parent material
                default_material.append('Missing (N/A)')
                partial_number_density.append(0.0)
                continue
            material_info = pick_material(parent, physical_prop)
            default_material.append(material_info.name+" ("+material_info['Formula']+")")
            partial_number_density.append(material_info['Number density-cm-3'] * material_info[parent]) # material_info[parent] chooses the fraction of atoms which is made of .

        population["default material"] = default_material
        population["partial number density (cm^-3)"] = partial_number_density
        population["gamma counts per volume of foil (cm^-3)"] = population["final counts accumulated per reactant atom"] * population["partial number density (cm^-3)"]
        population["gamma counts per unit thickness of foil (mm^-1)"] = population["gamma counts per volume of foil (cm^-3)"] * MAX_AREA * MM_CM# assuming the area = Foil Area
        tprint("Re-ordering the dataframe according to the counts per volume...")
        population.sort_values("gamma counts per volume of foil (cm^-3)", inplace=True, ascending=False) # sort again, this time according to the required volume

    tprint("Saving as 'counts.csv'...")
    try:
        population.to_csv(os.path.join(sys.argv[-1], 'counts.csv'), index_label='rname')
    except ZeroDivisionError:
        tprint("Minor issue when trying to print the values which are too small. Plese wait for a couple more minutes...")
        uncertain_columns = ["final counts accumulated per reactant atom"]
        if GUESS_MATERIAL:
            uncertain_columns+= ["gamma counts per volume of foil (cm^-3)", "gamma counts per unit thickness of foil (mm^-1)"]
        for col in uncertain_columns:
            less_than_mask = population[col]<12.5E-324 # when trying to express uncertainties.core.Variable using the  __str__ method, it will try to factorize it.
            # But if the GCD (between the norminal value and its counterpart) is rounded down to 1E-323 or smaller, it will lead to ZeroDivisionError.
            population[col][less_than_mask] = uncertainties.core.Variable(0.0, 0.0)
        population.to_csv(os.path.join(sys.argv[-1], 'counts.csv'), index_label='rname')

    # save parameters at the end.
    save_parameters_as_json(sys.argv[-1], dict(
        IRRADIATION_DURATION=IRRADIATION_DURATION,
        TRANSIT_DURATION=TRANSIT_DURATION,
        MEASUREMENT_DURATION=MEASUREMENT_DURATION,
        MAX_AREA=MAX_AREA,
        ENRICH_TO_100_PERCENT=ENRICH_TO_100_PERCENT,
        )
    )
    tprint("Run complete. See results in 'counts.csv'.")