def get_reaction_rate(directory, cell_list, nuc_list, reaction): """ Gets the reaction rate. The reaction rate is specifically result.rate_bar * result.concentration, as reaction rates are divided by atom density prior to utilization. Parameters ---------- directory : str Directory to read results from. cell_list : List[int] List of cell IDs to extract data from. nuc_list : List[str] List of nuclides to extract data from. Returns ------- time : np.array Time for each step. val : Dict[Dict[np.array]] Reaction rate, indexed [cell id : int][nuclide : str] """ # First, calculate how many step files are in the folder count = 0 for file in os.listdir(directory): if fnmatch.fnmatch(file, 'step*'): count += 1 # Allocate result val = {} time = np.zeros(count) for cell in cell_list: val[cell] = {} for nuc in nuc_list: val[cell][nuc] = np.zeros(count) # Read in file, get eigenvalue, close file for file in os.listdir(directory): if fnmatch.fnmatch(file, 'step*'): # Get ind (files will be found out of order) name = file.split(".") ind = int(name[0][4::]) # Read file result = results.read_results(directory + '/' + file) for cell in cell_list: if str(cell) in result.num[0].cell_to_ind: for nuc in nuc_list: if nuc in result.num[0].nuc_to_ind: val[cell][nuc][ind] = \ result.num[0][str(cell), nuc] * \ result.rate_bar[str(cell), nuc, reaction] time[ind] = result.time return time, val
def get_atoms(directory, cell_list, nuc_list): """ Get total atom count as a function of time. Parameters ---------- directory : str Directory to read results from. cell_list : List[int] List of cell IDs to extract data from. nuc_list : List[str] List of nuclides to extract data from. Returns ------- time : np.array Time for each step. val : Dict[Dict[np.array]] Total number of atoms, indexed [cell id : int][nuclide : str] """ # First, calculate how many step files are in the folder count = 0 for file in os.listdir(directory): if fnmatch.fnmatch(file, 'step*'): count += 1 # Allocate result val = {} time = np.zeros(count) for cell in cell_list: val[cell] = {} for nuc in nuc_list: val[cell][nuc] = np.zeros(count) # Read in file, get eigenvalue, close file for file in os.listdir(directory): if fnmatch.fnmatch(file, 'step*'): # Get ind (files will be found out of order) name = file.split(".") ind = int(name[0][4::]) # Read file result = results.read_results(directory + '/' + file) for cell in cell_list: if str(cell) in result.num[0].cell_to_ind: for nuc in nuc_list: if nuc in result.num[0].nuc_to_ind: val[cell][nuc][ind] = result.num[0][str(cell), nuc] time[ind] = result.time return time, val
def get_eigval(directory): """ Get eigenvalues as a function of time. Parameters ---------- directory : str Directory to read results from. Returns ------- time : np.array Time for each step. val : np.array Eigenvalue for each step. """ # First, calculate how many step files are in the folder count = 0 for file in os.listdir(directory): if fnmatch.fnmatch(file, 'step*'): count += 1 # Allocate result val = np.zeros(count) time = np.zeros(count) # Read in file, get eigenvalue, close file for file in os.listdir(directory): if fnmatch.fnmatch(file, 'step*'): # Get ind (files will be found out of order) name = file.split(".") ind = int(name[0][4::]) # Read file result = results.read_results(directory + '/' + file) # Extract results val[ind] = result.k time[ind] = result.time return time, val
def get_eigval_average(dir_list): """ Get eigenvalues as a function of time for a set of simulations. This function extracts the eigenvalue from several different simulation directories and merges them together. It is assumed that each directory was run precisely identically. Parameters ---------- directory : List[str] List of directories to read from. Returns ------- time : np.array Time for each step. mu : np.array Eigenvalue average for each step. std_val : np.array Eigenvalue standard deviation for each step. p_value : np.array Shapiro-Wilk p-value """ # First, calculate how many step files are in each folder count_list = [0 for directory in dir_list] for i in range(len(dir_list)): directory = dir_list[i] for file in os.listdir(directory): if fnmatch.fnmatch(file, 'step*'): count_list[i] += 1 # Allocate result count = min(count_list) val = np.zeros((count, len(dir_list))) time = np.zeros(count) # Read in file, get eigenvalue, close file for i in range(len(dir_list)): directory = dir_list[i] for file in os.listdir(directory): if fnmatch.fnmatch(file, 'step*'): # Get ind (files will be found out of order) name = file.split(".") ind = int(name[0][4::]) # Do not extract data past the end of the minimum number of run # steps. if ind >= count: continue # Read file result = results.read_results(directory + '/' + file) # Extract results val[ind, i] = result.k time[ind] = result.time # Perform statistics on result r_stats = scipy.stats.describe(val, axis=1) mu = r_stats.mean std_val = np.sqrt(r_stats.variance) / np.sqrt(len(dir_list)) p_val = [scipy.stats.shapiro(b)[1] for b in val] return time, mu, std_val, p_val
def get_atoms_volaveraged(directory, cell_list, nuc_list): """ Get volume averaged atom count as a function of time. This function sums the atom concentration from each cell and then divides by the volume sum. Parameters ---------- directory : str Directory to read results from. cell_list : List[int] List of cell IDs to average. nuc_list : List[str] List of nuclides to extract data from. Returns ------- time : np.array Time for each step. val : Dict[np.array] Volume averaged atoms, indexed [nuclide : str] """ # First, calculate how many step files are in the folder count = 0 for file in os.listdir(directory): if fnmatch.fnmatch(file, 'step*'): count += 1 # Allocate result val = {} time = np.zeros(count) for nuc in nuc_list: val[nuc] = np.zeros(count) # Calculate volume of cell_list # Load first result result_0 = results.read_results(directory + '/step0.pklz') vol = 0.0 for cell in cell_list: if cell in result_0.volume: vol += result_0.volume[cell] # Read in file, get eigenvalue, close file for file in os.listdir(directory): if fnmatch.fnmatch(file, 'step*'): # Get ind (files will be found out of order) name = file.split(".") ind = int(name[0][4::]) # Read file result = results.read_results(directory + '/' + file) for cell in cell_list: if str(cell) in result.num[0].cell_to_ind: for nuc in nuc_list: if nuc in result.num[0].nuc_to_ind: val[nuc][ind] += result.num[0][str(cell), nuc]/vol time[ind] = result.time return time, val
def parse(self, args): parser = argparse.ArgumentParser(description='parse files') # parser.add_argument('tweetfile', help="prefix file to use") # parser.add_argument('--refresh-dic', action='store_true') args = parser.parse_args(args) with open("qrels.txt") as f: relevant = read_relevant(f) columns = [ "", "P@10", "R@50", "r-Precision", "AP", "nDCG@10", "nDCG@20" ] orig_stdout = sys.stdout max_s = 6 averages = defaultdict(list) for num in range(1, max_s + 1): fname = "S" + str(num) with open(fname + ".results", "r") as f: retrieved = read_results(f) assert len(retrieved) == len(relevant) outfile = open(fname + ".eval", "w") sys.stdout = outfile print("\t".join(columns)) total = defaultdict(float) for q in retrieved.keys(): scores = get_scores(retrieved[q], relevant[q]) # eprint("scores for", q, "is", scores) for col in columns: if col == "": print(q, end="") else: score = scores[col] total[col] += score print("\t{0:.3f}".format(score), end="") print() for col in columns: if col == "": print("mean", end="") averages[fname].append(fname) else: score = total[col] / len(retrieved) score_str = "{0:.3f}".format(score) averages[fname].append(score_str) print("\t" + score_str, end="") print() outfile.close() with open("All.eval", "w") as f: sys.stdout = f print("\t".join(columns)) for num in range(1, max_s + 1): key = "S" + str(num) print("\t".join(averages[key])) sys.stdout = orig_stdout
from results import read_results for task in ["adv", "namepp", "noun_conj", "qnty_namepp", "qnty_nounpp", "nounpp", "qnty_simple", "rel_def_obj", "rel_def", "rel_nondef", "s_conj", "simple", "that_adv", "that_compl", "that_nounpp", "that"]: print(f"Reading results of {task}") res = read_results(f"output_ablation/{task}.info") if task in ["simple", "adv", "namepp", "qnty_simple", "qnty_namepp", "rel_def", "rel_nondef"]: print(f"S {res["873"]["accuracy_plural"]}") print(f"P {res["873"]["accuracy_singular"]}") elif task in ["nounpp", "noun_conj", "qnty_nounpp", "that", "that_adv", "that_compl", "rel_def_obj", "s_conj"]: print(f"SS {res["873"]["accuracy_singular_singular"]}") print(f"SP {res["873"]["accuracy_singular_plural"]}") print(f"PS {res["873"]["accuracy_plural_singular"]}") print(f"PP {res["873"]["accuracy_plural_plural"]}") elif task == "that_nounpp": print(f"SSS {res["873"]["accuracy_singular_singular_singular"]}") print(f"SSP {res["873"]["accuracy_singular_singular_plural"]}") print(f"SPS {res["873"]["accuracy_singular_plural_singular"]}") print(f"SPP {res["873"]["accuracy_singular_plural_plural"]}") print(f"PSS {res["873"]["accuracy_plural_singular_singular"]}") print(f"PSP {res["873"]["accuracy_plural_singular_plural"]}") print(f"PPS {res["873"]["accuracy_plural_plural_singular"]}") print(f"PPP {res["873"]["accuracy_plural_plural_plural"]}")
import results import zernike import numpy as np order = 10 rings = 20 wedges = 32 for i in range(31): r = results.read_results("./vera_1i_fet/step" + str(i) + ".pklz") con = r.num[0] #print(r.k) zer = zernike.ZernikePolynomial( order, con["10000", "Xe-135"] * rings * wedges / (np.pi * 0.4096**2) / np.pi) print(zer.coeffs[0] / (rings * wedges) * np.pi) #zer.force_positive() # zer.plot_disk(rings, wedges, "testg" + str(i+1) + ".pdf") rea = r.rates[0] zer = rea.get_fet(["10000", "Xe-135", "(n,gamma)"]) * rings * wedges / ( np.pi * 0.4096**2) / np.pi * 1.0e24