def read_section(self, mat, mf, mt): """Parse MAT/MF/MT section. """ if mf == 1: foo = mf1.read elif mf == 3: foo = mf3.read elif mf == 4: foo = mf4.read elif mf == 5: foo = mf5.read elif mf == 8: foo = mf8.read elif mf == 33 or mf == 31: foo = mf33.read elif mf == 34: foo = mf34.read elif mf == 35: foo = mf35.read else: raise SandyError( "SANDY cannot parse section MAT{}/MF{}/MT{}".format( mat, mf, mt)) if (mat, mf, mt) not in self.index: raise SandyError("section MAT{}/MF{}/MT{} is not in tape".format( mat, mf, mt)) return foo(self.loc[mat, mf, mt].TEXT)
def read_section(self, mat, mf, mt): """ Parse MAT/MF/MT section. Parameters ---------- mat : `int` MAT number mf : `int` MF number mt : `int` MT number Returns ------- `dict` content of MAT/MF/MT section structured as a dictionary """ if mf == 1: foo = mf1.read_errorr elif mf == 3: foo = mf3.read_errorr elif mf == 33 or mf == 31 or mf == 35: foo = mf33.read_errorr else: raise SandyError( "SANDY cannot parse section MAT{}/MF{}/MT{}".format( mat, mf, mt)) if (mat, mf, mt) not in self.index: raise SandyError("section MAT{}/MF{}/MT{} is not in tape".format( mat, mf, mt)) return foo(self.loc[mat, mf, mt].TEXT)
def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) if self.empty: raise SandyError("tape is empty") self.index.names = self.labels self.columns = ["TEXT"] self.sort_index(level=self.labels, inplace=True) if self.index.duplicated().any(): raise SandyError("found duplicate MAT/MF/MT")
def get_xs(self, listmat=None, listmt=None): """ Extract selected cross sections (xs). xs are linearized on unique grid. Missing points are linearly interpolated (use zero when out of domain). Conditions: - Interpolation law must be lin-lin - No duplicate points on energy grid """ condition = self.index.get_level_values("MF") == 3 tape = self[condition] if listmat is not None: conditions = [ tape.index.get_level_values("MAT") == x for x in listmat ] condition = reduce(lambda x, y: np.logical_or(x, y), conditions) tape = tape[condition] if listmt is not None: conditions = [ tape.index.get_level_values("MT") == x for x in listmt ] condition = reduce(lambda x, y: np.logical_or(x, y), conditions) tape = tape[condition] ListXs = [] for ix, text in tape.TEXT.iteritems(): X = self.read_section(*ix) xs = pd.Series(X["XS"], index=X["E"], name=(X["MAT"], X["MT"])).rename_axis("E").to_frame() duplicates = [ x for x, count in Counter(xs.index).items() if count > 1 ] if duplicates: raise SandyError( 'duplicate energy points found for MAT{}/MF{}/MT{}\n'. format(*ix) + '\n'.join(map(str, duplicates))) if X['INT'] != [2]: raise SandyError( 'MAT{}/MF{}/MT{} interpolation scheme is not lin-lin'. format(*ix)) ListXs.append(xs) if not ListXs: logging.warn("requested cross sections were not found") return pd.DataFrame() frame = reduce( lambda left, right: pd.merge( left, right, left_index=True, right_index=True, how='outer'), ListXs).sort_index().interpolate(method='slinear', axis=0).fillna(0) return Xs(frame)
def read_section(self, mat, mf, mt): """ Parse MAT/MF/MT section """ if mf == 1: from .MF1 import read_groupr as read elif mf == 3: from .MF3 import read_groupr as read else: raise SandyError( "SANDY cannot parse section MAT{}/MF{}/MT{}".format( mat, mf, mt)) if (mat, mf, mt) not in self.index: raise SandyError("section MAT{}/MF{}/MT{} is not in tape".format( mat, mf, mt)) return read(self.loc[mat, mf, mt].TEXT)
def get_suffix(temp, meta, method=None): """ Determine suffix saccording to temperature value. Parameters ---------- temp : `float` processing temperature meta : `int` metastate number method : `str`, optional, default `None` use `method="aleph"` to treat metastate extensions using aleph rules Returns ------- `str` suffix """ dct = tmp2ext_meta if meta and method == "aleph" else tmp2ext if temp in dct: temp_in_dict = temp else: splitter = 50 if temp < 1000 else 100 temp_in_dict = int(round(temp/splitter)*splitter) if temp_in_dict not in dct: raise SandyError("extension was not found for temperature '{}'".format(temp)) return dct[temp_in_dict]
def get_njoy(): """ Extract njoy executable from system environment variable `NJOY`. Returns ------- `string` njoy executable Raises ------ `SandyError` if environment variable `NJOY` is not assigned """ if "NJOY" in os.environ: exe = os.environ["NJOY"] else: raise SandyError("environment variable 'NJOY' is not assigned") return exe
def _run_njoy(text, inputs, outputs, exe=None): """ Run njoy executable for given input. Parameters ---------- inputs : `map` map of {`tape` : `file`) for input files outputs : `map` map of {`tape` : `file`) for ouptut files text : `str` njoy input file passed to `Popen` as `stdin` (it must be encoded first) exe : `str`, optional, default is `None` njoy executable: if `None` (default) get it from `NJOY` env variable """ if exe is None: exe = get_njoy() logging.debug("Use NJOY executable '{}'".format(exe)) stdout = stderr = None stdin = text.encode() with tempfile.TemporaryDirectory() as tmpdir: logging.debug("Create temporary directory '{}'".format(tmpdir)) for tape,src in inputs.items(): shutil.copy(src, os.path.join(tmpdir, tape)) process = sp.Popen(exe, shell=True, cwd=tmpdir, stdin=sp.PIPE, stdout=stdout, stderr=stderr) stdoutdata, stderrdata = process.communicate(input=stdin) logging.debug(stdoutdata) logging.debug(stderrdata) if process.returncode != 0: raise SandyError("process status={}, cannot run njoy executable".format(process.returncode)) for tape,dst in outputs.items(): path = os.path.split(dst)[0] if path: os.makedirs(path, exist_ok=True) shutil.move(os.path.join(tmpdir, tape), dst)
def sampling(iargs=None): """Construct multivariate normal distributions with a unit vector for mean and with relative covariances taken from the evaluated files. Perturbation factors are sampled with the same multigroup structure of the covariance matrix, and are applied to the pointwise data to produce the perturbed files. """ global init, pnu, pxs, plpc, pchi, pfy, tape init = parse(iargs) ftape = read_formatted_file(init.file) covtape = read_formatted_file(init.cov) if init.cov else ftape # nsub = ftape.get_nsub() # INITIALIZE PERT DF pnu = pd.DataFrame() pxs = pd.DataFrame() plpc = pd.DataFrame() pchi = pd.DataFrame() pfy = pd.DataFrame() ftape, covtape, pnu, pxs, plpc, pchi, pfy = extract_samples(ftape, covtape) df = {} if pnu.empty and pxs.empty and plpc.empty and pchi.empty and pfy.empty: logging.warn("no covariance section was selected/found") return ftape, covtape, df # APPLY PERTURBATIONS BY MAT for imat,(mat, tape) in enumerate(sorted(ftape.groupby('MAT'))): skip_title = False if imat == 0 else True skip_fend = False if imat == len(ftape.mat) - 1 else True tape = Endf6(tape) kw = dict(skip_title=skip_title, skip_fend=skip_fend) if init.processes == 1: outs = {i : _sampling_mp(i, **kw) for i in range(1,init.samples+1)} else: pool = mp.Pool(processes=init.processes) outs = {i : pool.apply_async(_sampling_mp, (i,), kw) for i in range(1,init.samples+1)} outs = {i : out.get() for i,out in outs.items()} pool.close() pool.join() df.update({mat : outs}) # DUMP TO FILES frame = pd.DataFrame(df) frame.index.name = "SMP" frame.columns.name = "MAT" frame = frame.stack() outname = init.outname if init.outname else os.path.split(init.file)[1] for ismp,dfsmp in frame.groupby("SMP"): output = os.path.join(init.outdir, '{}-{}'.format(outname, ismp)) with open(output, 'w') as f: for mat,dfmat in dfsmp.groupby("MAT"): f.write(frame[ismp,mat]) # PRODUCE ACE FILES if init.acer: if init.processes == 1: for i in range(1,init.samples+1): _process_into_ace(ismp) else: pool = mp.Pool(processes=init.processes) outs = {i : pool.apply_async(_process_into_ace, (i,)) for i in range(1,init.samples+1)} pool.close() pool.join() return ftape, covtape, df pdb.set_trace() df = {} if init.fission_yields: # EXTRACT FY PERTURBATIONS FROM COV FILE fy = ftape.get_fy(listmat=init.mat, listmt=init.mt) if fy.empty: logging.warn("no fission yield section was selected/found") return index = fy.index.to_frame(index=False) dfperts = [] for mat,dfmat in index.groupby("MAT"): for mt,dfmt in dfmat.groupby("MT"): for e,dfe in dfmt.groupby("E"): fycov = fy.get_cov(mat, mt, e) pert = fycov.get_samples(init.samples, eig=0) dfperts.append(pert) PertFy = FySamples(pd.concat(dfperts)) if init.debug: PertFy.to_csv("perts_mf8.csv") # DELETE LOCAL VARIABLES for k in locals().keys(): del locals()[k] # APPLY PERTURBATIONS BY MAT for imat,(mat, tape) in enumerate(sorted(ftape.groupby('MAT'))): skip_title = False if imat == 0 else True skip_fend = False if imat == ftape.index.get_level_values("MAT").unique().size -1 else True tape = Endf6(tape) kw = dict(skip_title=skip_title, skip_fend=skip_fend) if mat not in init.mat: out = tape.write_string(**kw) outs = {i : out for i in range(1,init.samples+1)} else: if init.processes == 1: outs = {i : _sampling_fy_mp(i, **kw) for i in range(1,init.samples+1)} else: pool = mp.Pool(processes=init.processes) outs = {i : pool.apply_async(_sampling_fy_mp, (i,), kw) for i in range(1,init.samples+1)} outs = {i : out.get() for i,out in outs.items()} pool.close() pool.join() df.update({ mat : outs }) else: # EXTRACT NUBAR PERTURBATIONS FROM ENDF6 FILE PertNubar = pd.DataFrame() if 31 in init.mf and 31 in ftape.mf: nubarcov = XsCov.from_endf6(covtape.filter_by(listmat=init.mat, listmf=[31], listmt=listmt)) if not nubarcov.empty: PertNubar = nubarcov.get_samples(init.samples, eig=init.eig) if init.debug: PertNubar.to_csv("perts_mf31.csv") # EXTRACT PERTURBATIONS FROM EDISTR COV FILE PertEdistr = pd.DataFrame() if 35 in init.mf and 35 in ftape.mf: edistrcov = ftape.get_edistr_cov() if not edistrcov.empty: PertEdistr = edistrcov.get_samples(init.samples, eig=init.eig) if init.debug: PertEdistr.to_csv("perts_mf35.csv") # EXTRACT PERTURBATIONS FROM LPC COV FILE PertLpc = pd.DataFrame() if 34 in init.mf and 34 in covtape.mf: lpccov = ftape.get_lpc_cov() if not lpccov.empty: if init.max_polynomial: lpccov = lpccov.filter_p(init.max_polynomial) PertLpc = lpccov.get_samples(init.samples, eig=init.eig) if init.debug: PertLpc.to_csv("perts_mf34.csv") # EXTRACT XS PERTURBATIONS FROM COV FILE PertXs = pd.DataFrame() if 33 in init.mf and 33 in covtape.mf: if init.errorr and len(ftape.mat) > 1: # Limit imposed by running ERRORR to get covariance matrices raise SandyError("More than one MAT number was found") if ftape.get_file_format() == "endf6": with tempfile.TemporaryDirectory() as td: outputs = njoy.process(init.file, broadr=False, thermr=False, unresr=False, heatr=False, gaspr=False, purr=False, errorr=init.errorr, acer=False, wdir=td, keep_pendf=True, temperatures=[0], suffixes=[0], err=0.005)[2] ptape = read_formatted_file(outputs["tape30"]) if init.errorr: covtape = read_formatted_file(outputs["tape33"]) # WARNING: by doing this we delete the original covtape ftape = ftape.delete_sections((None, 3, None)). \ add_sections(ptape.filter_by(listmf=[3])). \ add_sections(ptape.filter_by(listmf=[1], listmt=[451])) listmterr = init.mt if init.mt is None else [451].extend(init.mt) # ERRORR needs MF1/MT451 to get the energy grid covtape = covtape.filter_by(listmat=init.mat, listmf=[1,33], listmt=listmterr) covtype = covtape.get_file_format() xscov = XsCov.from_errorr(covtape) if covtype == "errorr" else XsCov.from_endf6(covtape) if not xscov.empty: PertXs = xscov.get_samples(init.samples, eig=init.eig, seed=init.seed33) if init.debug: PertXs.to_csv(os.path.join(init.outdir, "perts_mf33.csv")) if PertLpc.empty and PertEdistr.empty and PertXs.empty and PertNubar.empty: sys.exit("no covariance section was selected/found") return pdb.set_trace() # DELETE LOCAL VARIABLES for k in locals().keys(): del locals()[k] # APPLY PERTURBATIONS BY MAT for imat,(mat, tape) in enumerate(sorted(ftape.groupby('MAT'))): skip_title = False if imat == 0 else True skip_fend = False if imat == ftape.index.get_level_values("MAT").unique().size -1 else True tape = Endf6(tape) kw = dict(skip_title=skip_title, skip_fend=skip_fend) if init.processes == 1: outs = {i : _sampling_mp(i, **kw) for i in range(1,init.samples+1)} else: pool = mp.Pool(processes=init.processes) outs = {i : pool.apply_async(_sampling_mp, (i,), kw) for i in range(1,init.samples+1)} outs = {i : out.get() for i,out in outs.items()} pool.close() pool.join() df.update({ mat : outs }) # DUMP TO FILES frame = pd.DataFrame(df) frame.index.name = "SMP" frame.columns.name = "MAT" frame = frame.stack() outname = init.outname if init.outname else os.path.split(init.file)[1] for ismp,dfsmp in frame.groupby("SMP"): output = os.path.join(init.outdir, '{}-{}'.format(outname, ismp)) with open(output, 'w') as f: for mat,dfmat in dfsmp.groupby("MAT"): f.write(frame[ismp,mat])
def extract_samples(ftape, covtape): global init # EXTRACT FY PERTURBATIONS FROM COV FILE PertFy = pd.DataFrame() if 8 in covtape.mf and 454 in ftape.mt: fy = ftape.get_fy(listmat=init.mat, listmt=init.mt) if not fy.empty: index = fy.index.to_frame(index=False) dfperts = [] for mat,dfmat in index.groupby("MAT"): for mt,dfmt in dfmat.groupby("MT"): for e,dfe in dfmt.groupby("E"): fycov = fy.get_cov(mat, mt, e) pert = fycov.get_samples(init.samples, eig=0) dfperts.append(pert) PertFy = FySamples(pd.concat(dfperts)) if init.debug: PertFy.to_csv("perts_mf8.csv") # EXTRACT NUBAR PERTURBATIONS FROM ENDF6 FILE PertNubar = pd.DataFrame() if 31 in init.mf and 31 in ftape.mf: nubarcov = XsCov.from_endf6(covtape.filter_by(listmat=init.mat, listmf=[31], listmt=init.mt)) if not nubarcov.empty: PertNubar = nubarcov.get_samples(init.samples, eig=init.eig) if init.debug: PertNubar.to_csv("perts_mf31.csv") # EXTRACT PERTURBATIONS FROM EDISTR COV FILE PertEdistr = pd.DataFrame() if 35 in init.mf and 35 in ftape.mf: edistrcov = ftape.get_edistr_cov() if not edistrcov.empty: PertEdistr = edistrcov.get_samples(init.samples, eig=init.eig) if init.debug: PertEdistr.to_csv("perts_mf35.csv") # EXTRACT PERTURBATIONS FROM LPC COV FILE PertLpc = pd.DataFrame() if 34 in init.mf and 34 in covtape.mf: lpccov = ftape.get_lpc_cov() if not lpccov.empty: if init.max_polynomial: lpccov = lpccov.filter_p(init.max_polynomial) PertLpc = lpccov.get_samples(init.samples, eig=init.eig) if init.debug: PertLpc.to_csv("perts_mf34.csv") # EXTRACT XS PERTURBATIONS FROM COV FILE PertXs = pd.DataFrame() if 33 in init.mf and 33 in covtape.mf: if init.errorr and len(ftape.mat) > 1: # Limit imposed by running ERRORR to get covariance matrices raise SandyError("More than one MAT number was found") if ftape.get_file_format() == "endf6": with tempfile.TemporaryDirectory() as td: outputs = njoy.process(init.file, broadr=False, thermr=False, unresr=False, heatr=False, gaspr=False, purr=False, errorr=init.errorr, acer=False, wdir=td, keep_pendf=True, exe=init.njoy, temperatures=[0], suffixes=[0], err=0.005)[2] ptape = read_formatted_file(outputs["tape30"]) if init.debug: shutil.move(outputs["tape30"], os.path.join(init.outdir, "tape30")) if init.errorr: covtape = read_formatted_file(outputs["tape33"]) # WARNING: by doing this we delete the original covtape if init.debug: shutil.move(outputs["tape33"], os.path.join(init.outdir, "tape33")) ftape = ftape.delete_sections((None, 3, None)). \ add_sections(ptape.filter_by(listmf=[3])). \ add_sections(ptape.filter_by(listmf=[1], listmt=[451])) listmt = sorted(set(init.mt + [451])) # ERRORR needs MF1/MT451 to get the energy grid covtape = covtape.filter_by(listmat=init.mat, listmf=[1,33], listmt=listmt) covtype = covtape.get_file_format() xscov = XsCov.from_errorr(covtape) if covtype == "errorr" else XsCov.from_endf6(covtape) if not xscov.empty: PertXs = xscov.get_samples(init.samples, eig=init.eig, seed=init.seed33) if init.debug: PertXs.to_csv(os.path.join(init.outdir, "perts_mf33.csv")) return ftape, covtape, PertNubar, PertXs, PertLpc, PertEdistr, PertFy
def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) if len(self.mat) != 1: raise SandyError("only 1 MAT number is allowed in ERRORR file")
def get_nubar(self, listmat=None, listmt=None): """ Extract selected nubar. nubar are linearized on unique grid. Missing points are linearly interpolated (use zero when out of domain). Conditions: - Interpolation law must be lin-lin - No duplicate points on energy grid """ condition = self.index.get_level_values("MF") == 1 tape = self[condition] conditions = [ tape.index.get_level_values("MT") == x for x in [452, 455, 456] ] condition = reduce(lambda x, y: np.logical_or(x, y), conditions) tape = tape[condition] if listmat is not None: conditions = [ tape.index.get_level_values("MAT") == x for x in listmat ] condition = reduce(lambda x, y: np.logical_or(x, y), conditions) tape = tape[condition] if listmt is not None: conditions = [ tape.index.get_level_values("MT") == x for x in listmt ] condition = reduce(lambda x, y: np.logical_or(x, y), conditions) tape = tape[condition] # query = "MF==1 & (MT==452 | MT==455 | MT==456)" # if listmat is not None: # query_mats = " | ".join(["MAT=={}".format(x) for x in listmat]) # query += " & ({})".format(query_mats) # if listmt is not None: # query_mts = " | ".join(["MT=={}".format(x) for x in listmt]) # query += " & ({})".format(query_mts) # tape = self.query(query) ListXs = [] for ix, text in tape.TEXT.iteritems(): X = self.read_section(*ix) xs = pd.Series(X["NUBAR"], index=X["E"], name=(X["MAT"], X["MT"])).rename_axis("E").to_frame() duplicates = [ x for x, count in Counter(xs.index).items() if count > 1 ] if duplicates: raise SandyError( 'duplicate energy points found for MAT{}/MF{}/MT{}\n'. format(*ix) + '\n'.join(map(str, duplicates))) if X['INT'] != [2]: raise SandyError( 'MAT{}/MF{}/MT{} interpolation scheme is not lin-lin'. format(*ix)) ListXs.append(xs) if not ListXs: logging.warn("no fission neutron multiplicity was found") return pd.DataFrame() frame = reduce( lambda left, right: pd.merge( left, right, left_index=True, right_index=True, how='outer'), ListXs).sort_index().interpolate(method='slinear', axis=0).fillna(0) return Xs(frame)
def process(endftape, pendftape=None, kermas=[302, 303, 304, 318, 402, 442, 443, 444, 445, 446, 447], temperatures=[293.6], suffixes=None, broadr=True, thermr=True, unresr=False, heatr=True, gaspr=True, purr=True, errorr=False, acer=True, wdir="", dryrun=False, tag="", method=None, exe=None, keep_pendf=True, route="0", addpath=None, **kwargs): """ Run sequence to process file with njoy. Parameters ---------- pendftape : `str`, optional, default is `None` skip module reconr and use this PENDF file kermas : iterable of `int`, optional, default is `[302, 303, 304, 318, 402, 442, 443, 444, 445, 446, 447]` MT numbers for partial kermas to pass to heatr. .. note:: `MT=301` is the KERMA total (energy balance) and is always calculated temperatures : iterable of `float`, optional, default is [293.6] iterable of temperature values in K suffixes : iterable of `int`, optional, default is `None` iterable of suffix values for ACE files: if `None` is given, use internal routine to determine suffixes .. warning:: `suffixes` must match the number of entries in `temperatures` broadr : `bool`, optional, default is `True` option to run module broadr thermr : `bool`, optional, default is `True` option to run module thermr unresr : `bool`, optional, default is `False` option to run module unresr heatr : `bool`, optional, default is `True` option to run module heatr gaspr : `bool`, optional, default is `True` option to run module gapr purr : `bool`, optional, default is `True` option to run module purr errorr : `bool`, optional, default is `False` option to run module errorr acer : `bool`, optional, default is `True` option to run module acer wdir : `str`, optional, default is `""` working directory (absolute or relative) where all output files are saved .. note:: `wdir` will appear as part of the `filename` in any `xsdir` file if `addpath` is not set addpath : `str`, optional, default is `None` path to add in xsdir, by default use `wdir` dryrun : `bool`, optional, default is `False` option to produce the njoy input file without running njoy tag : `str`, optional, default is `""` tag to append to each output filename before the extension (default is `None`) .. hint:: to process JEFF-3.3 files you could set `tag = "_j33"` exe : `str`, optional, default is `None` njoy executable (with path) .. note:: if no executable is given, SANDY looks for a default executable in `PATH` and in env variable `NJOY` keep_pendf : `bool`, optional, default is `True` save output PENDF file route : `str`, optional, default is `0` xsdir "route" parameter Returns ------- input : `str` njoy input text inputs : `map` map of {`tape` : `file`) for input files outputs : `map` map of {`tape` : `file`) for ouptut files """ tape = Endf6.from_file(endftape) mat = tape.mat[0] info = tape.read_section(mat, 1, 451) meta = info["LISO"] za = int(info["ZA"]) zam = za*10 + meta za_new = za + meta*100 + 300 if meta else za outprefix = zam if method == "aleph" else za_new inputs = {} outputs = {} # Only kwargs are passed to NJOY inputs, therefore add temperatures and mat kwargs.update({"temperatures" : temperatures, "mat" : mat}) # Check input args if not suffixes: suffixes = [get_suffix(temp, meta, method) for temp in temperatures] if len(suffixes) != len(temperatures): raise SandyError("number of suffixes must match number of temperatures") inputs["tape20"] = endftape e = 21 p = e + 1 text = _moder_input(20, -e) if pendftape: inputs["tape99"] = pendftape text += _moder_input(99, -p) else: text += _reconr_input(-e, -p, **kwargs) if broadr: o = p + 1 text += _broadr_input(-e, -p, -o, **kwargs) p = o if thermr: o = p + 1 text += _thermr_input(0, -p, -o, **kwargs) p = o if unresr: o = p + 1 text += _unresr_input(-e, -p, -o, **kwargs) p = o if heatr: for i in range(0, len(kermas), 7): o = p + 1 kwargs["pks"] = kermas[i:i+7] text += _heatr_input(-e, -p, -o, **kwargs) p = o if gaspr: o = p + 1 text += _gaspr_input(-e, -p, -o, **kwargs) p = o if purr: o = p + 1 text += _purr_input(-e, -p, -o, **kwargs) p = o if keep_pendf: o = 30 text += _moder_input(-p, o) outputs["tape{}".format(o)] = os.path.join(wdir, "{}{}.pendf".format(outprefix, tag)) if errorr: for i,(temp,suff) in enumerate(zip(temperatures, suffixes)): o = 33 + i kwargs["temp"] = temp kwargs["suff"] = suff = ".{}".format(suff) text += _errorr_input(-e, -p, o, **kwargs) outputs["tape{}".format(o)] = os.path.join(wdir, "{}{}{}.errorr".format(outprefix, tag, suff)) if acer: for i,(temp,suff) in enumerate(zip(temperatures, suffixes)): a = 50 + i x = 70 + i kwargs["temp"] = temp kwargs["suff"] = suff = ".{}".format(suff) text += _acer_input(-e, -p, a, x, **kwargs) outputs["tape{}".format(a)] = os.path.join(wdir, "{}{}{}c".format(outprefix, tag, suff)) outputs["tape{}".format(x)] = os.path.join(wdir, "{}{}{}c.xsd".format(outprefix, tag, suff)) text += "stop" if not dryrun: _run_njoy(text, inputs, outputs, exe=exe) if acer: # Change route and filename in xsdir file. for i,(temp,suff) in enumerate(zip(temperatures, suffixes)): a = 50 + i x = 70 + i acefile = outputs["tape{}".format(a)] if addpath is None: filename = acefile else: filename = os.path.basename(acefile) if addpath: filename = os.path.join(addpath, filename) xsdfile = outputs["tape{}".format(x)] text_xsd = open(xsdfile).read(). \ replace("route", route). \ replace("filename", filename) text_xsd = " ".join(text_xsd.split()) # If isotope is metatable rewrite ZA in xsdir and ace as ZA = Z*1000 + 300 + A + META*100. if meta and method != "aleph": pattern = '{:d}'.format(za) + '\.(?P<ext>\d{2}[ct])' found = re.search(pattern, text_xsd) ext = found.group("ext") text_xsd = text_xsd.replace("{:d}.{}".format(za, ext), "{:d}.{}".format(za_new, ext), 1) text_ace = open(acefile).read() text_ace = text_ace.replace("{:d}.{}".format(za, ext), "{:d}.{}".format(za_new, ext), 1) with open(acefile, 'w') as f: f.write(text_ace) with open(xsdfile, 'w') as f: f.write(text_xsd) return text, inputs, outputs