def get_next_ids(test=False): """Get the next ids from a file Parameters ---------- test: bool, optional If true, the command is run for the ``nepc_test`` database. Otherwise, it is run for the ``nepc`` database. (Default is the ``nepc`` database.) Returns ------- : int Next cs_id to use : int Next csdata_id to use """ if test: nepc_data_home = nepc_config.nepc_home() + '/tests/data/' else: nepc_data_home = nepc_config.nepc_cs_home() + '/data/' filename = nepc_data_home + "/next_id.tsv" with open(filename) as id_file: id_line = id_file.readlines() next_cs_id, next_csdata_id = id_line[1].split('\t') return int(next_cs_id), int(next_csdata_id)
def format_model(model, type='lxcat', filename='lxcat.txt'): valid_types = ['lxcat'] if type not in valid_types: raise Exception(f'type {type} is not supported') if not isinstance(model, nepc.nepc.Model): raise Exception(f'model {model} is not supported') file_process_lxcat = nepc_config.nepc_home( ) + '/tests/data/processes_lxcat.tsv' with open(file_process_lxcat) as f: processes_lxcat = pd.read_csv( f, sep='\t', header=0, names=['name', 'lxcat']).set_index('name').T.to_dict('records')[0] def threshold(cs): if cs.metadata['process'] in ['total', 'elastic', 'elastic_total']: #TODO: implement m/M fully return 1.95e-5 else: return cs.metadata['threshold'] with open(filename, 'w') as f: for cs in model.cs: for metadata in [ 'process', 'reaction_abbrev', 'threshold', 'reaction_full', 'param', 'header' ]: if metadata == 'process': line = f"{str(processes_lxcat[cs.metadata[metadata]]).upper()}\n" elif metadata == 'reaction_abbrev': line = f'{cs.reaction_text[0]}\n' elif metadata == 'threshold': line = f" {threshold(cs):.6e}\n" elif metadata == 'reaction_full': line = ( f'PROCESS: {str(cs.reaction_text[1])}, ' f'{str(processes_lxcat[cs.metadata["process"]]).capitalize()}\n' ) elif metadata == 'param': if cs.metadata['process'] in [ 'total', 'elastic', 'elastic_total' ]: line = f'PARAM.: m/M = {threshold(cs)}\n' # TODO: make sure units_e is not needed here else: line = f'PARAM.: E = {threshold(cs)} eV\n' elif metadata == 'header': line = (f'COLUMNS: Energy (eV) | Cross section (m2)\n' f'-----------------------------\n') else: line = f"{cs.metadata[metadata]}\n" f.write(line) for e, sigma in zip(cs.data['e'], cs.data['sigma']): f.write(f'{e:.6e}\t{sigma:.6e}\n') f.write(f'-----------------------------\n\n')
def data_config(github): if github: NEPC_HOME = os.getcwd() else: NEPC_HOME = config.nepc_home() NEPC_DATA = NEPC_HOME + "/tests/data/" DIR_NAMES = [ NEPC_HOME + "/tests/data/cs/lxcat/n2/fict/", NEPC_HOME + "/tests/data/cs/lumped/n2/fict_total/" ] yield [NEPC_DATA, DIR_NAMES]
def process_attr(process: str, attr_list: List[str], test=False): if test: NEPC_DATA = config.nepc_home() + "/tests/data" else: NEPC_DATA = config.nepc_cs_home() + "/data" proc_df = pd.read_csv(NEPC_DATA + '/processes.tsv', sep='\t', header=0) process_attr_dict = {} for attr in attr_list: process_attr_dict[attr] = proc_df.loc[proc_df.name == process, attr].values[0] return process_attr_dict
def get_states(test=False): """Get lists of name's and long_name's from states.tsv file.""" if test: nepc_data_home = nepc_config.nepc_home() + '/tests/data/' else: nepc_data_home = nepc_config.nepc_cs_home() + '/data/' filename = nepc_data_home + 'states.tsv' with open(filename) as states_f: states_lines = states_f.readlines()[1:] states = [] for line in states_lines: states.append(line.split('\t')) return ([states[i][1] for i in range(len(states))], [states[i][2] for i in range(len(states))])
def write_next_id_to_file(next_cs_id, next_csdata_id, test=False): """Write out the next id's for the database to a file. Parameters ---------- next_cs_id : int The next cs_id to use next_csdata_id: int The next csdata_id to use test: bool, optional If true, the command is run for the ``nepc_test`` database. Otherwise, it is run for the ``nepc`` database. (Default is the ``nepc`` database.) """ if test: nepc_data_home = nepc_config.nepc_home() + '/tests/data/' else: nepc_data_home = nepc_config.nepc_cs_home() + '/data/' filename = nepc_data_home + "/next_id.tsv" id_file = open(filename, "w+") id_file.write("\t".join(('next_cs_id', 'next_csdata_id')) + "\n") id_file.write("\t".join((str(next_cs_id), str(next_csdata_id)))) id_file.close()
PARSER.add_argument('--debug', action='store_true', help='print additional debug info') PARSER.add_argument('--test', action='store_true', help='build test database') ARGS = PARSER.parse_args() if ARGS.debug: MAX_CS = 50 MAX_RATE = 50 else: MAX_CS = 2000000 MAX_RATE = 2000000 if ARGS.test: database = 'nepc_test' NEPC_DATA = config.nepc_home() + "/tests/data/" DIR_NAMES = ["/cs/n2/fict/", "/cs/n2/fict_total/"] else: database = 'nepc' NEPC_DATA = config.nepc_cs_home() + "/data/" DIR_NAMES = [ "/cs/n2/itikawa/", "/cs/n2/zipf/", "/cs/n/zatsarinny/", "/cs/n2/phelps/", "/cs/n2/phelps_total/", "/cs/n2/little/" ] T0 = time.time() HOME = config.user_home() def np_str(df, row, name):
help='build test database on GitHub runner') ARGS = PARSER.parse_args() if ARGS.debug: MAX_CS = 50 MAX_RATE = 50 else: MAX_CS = 2000000 MAX_RATE = 2000000 if ARGS.test and ARGS.github: raise Exception('can pass only --test or --github, not both') if ARGS.test: database = 'nepc_test' NEPC_HOME = config.nepc_home() NEPC_DATA = NEPC_HOME + "/tests/data/" DIR_NAMES = ["/cs/lxcat/n2/fict/", "/cs/lumped/n2/fict_total/"] HOME = config.user_home() option_files = HOME + '/.mysql/defaults' elif ARGS.github: database = 'nepc_test' NEPC_HOME = os.getcwd() NEPC_DATA = NEPC_HOME + "/tests/data/" DIR_NAMES = ["/cs/lxcat/n2/fict/", "/cs/lumped/n2/fict_total/"] option_files = NEPC_HOME + '/nepc/mysql/defaults' else: database = 'nepc' NEPC_DATA = config.nepc_cs_home() + "/data/" DIR_NAMES = [ "/cs/n2/itikawa/", "/cs/n/zatsarinny/", "/cs/lxcat/n2/phelps/",
def test_nepc_home(data_config, travis): """Verify whether nepc_home() returns a string that represents the correct path to the NEPC folder""" if not travis: assert isinstance(config.nepc_home(), str) assert config.nepc_home() == os.environ.get('NEPC_HOME')