def main(): model_path, sens_path, software, output = parse_arguments() chemkin_path = os.path.join(model_path, 'chem_annotated.inp') spc_dict_path = os.path.join(model_path, 'species_dictionary.txt') spc_dict = load_spc_dict(spc_dict_path) # Get species info from flux diagrams sensitivities = find_sensitivity_results(sens_path) print('Find sensitivities:\n' + '\n'.join(sensitivities)) spc_info = get_spc_info_from_sensitivities(sensitivities) # The results usually contains a lot of S(XX), better to convert them using aliases spc_aliases = get_species_aliases(chemkin_path, key='chemkin') # Get smiles, adjacency list and other information from speceis dictionary spc_info = expand_spc_info_by_spc_dict(spc_info, spc_dict) # Generate ARC input arc_input = {'species': []} for label, spc in spc_info.items(): spc.update({'label': spc_aliases[label]}) arc_input['species'].append(spc) output = output or os.curdir output = os.path.join(output, 'input_sens.yml') actual_output_path = save_yaml_file(output, arc_input, overwrite=False) print(f'Saved to {actual_output_path}.')
def main(): model_path, flux_path, software, output = parse_arguments() chemkin_path = os.path.join(model_path, 'chem_annotated.inp') spc_dict_path = os.path.join(model_path, 'species_dictionary.txt') spc_dict = load_spc_dict(spc_dict_path) # Get species info from flux diagrams flux_diagrams = find_flux_diagrams(flux_path) print('Find fluxdiagrams:\n' + '\n'.join(flux_diagrams)) spc_info = get_spc_info_from_flux_diagrams(flux_diagrams) # These packages use rmg label instead of Chemkin label if software in ['rmg']: spc_aliases = get_species_aliases(chemkin_path, key='rmg') else: spc_aliases = {'label': label for label in spc_info.keys()} # Get smiles, adjacency list and other information from speceis dictionary spc_info = expand_spc_info_by_spc_dict(spc_info, spc_dict, spc_aliases) # Generate ARC input arc_input = {'species': []} for label, spc in spc_info.items(): spc.update({'label': spc_aliases[label]}) arc_input['species'].append(spc) output = os.curdir or output output = os.path.join(output, 'input_flux.yml') actual_output_path = save_yaml_file(output, arc_input, overwrite=False) print(f'Saved to {actual_output_path}.')
def main(): inputs, output, resonance = parse_arguments() arc_input = combine_arc_species_inputs(*inputs, resonance) output = output or os.curdir output = os.path.join(output, 'input_merged.yml') actual_output_path = save_yaml_file(output, arc_input, overwrite=False) print(f'Saved to {actual_output_path}.')
def main(): model_path, output = parse_arguments() chemkin_path = os.path.join(model_path, 'chem_annotated.inp') spc_dict_path = os.path.join(model_path, 'species_dictionary.txt') spc_dict = load_spc_dict(spc_dict_path) spc_aliases = get_species_aliases(chemkin_path, key='rmg') spc_info = {label: {'label': label} for label in spc_dict.keys()} # Get smiles, adjacency list and other information from speceis dictionary spc_info = expand_spc_info_by_spc_dict(spc_info, spc_dict) # Generate ARC input arc_input = {'species': []} for label, spc in spc_info.items(): spc.update({'label': spc_aliases[label]}) arc_input['species'].append(spc) if not output: output = os.path.join(model_path, 'input_spc_dict.yml') actual_output_path = save_yaml_file(output, arc_input, overwrite=False) print(f'Saved to {actual_output_path}.')
def main(): input_file, libraries_path, filter_spc_dict, output = parse_arguments() # Get species info in the input file arc_input_species = read_yaml_file(input_file)['species'] spc_info = {spc['label']: spc for spc in arc_input_species} print(f'Starting with {len(spc_info)} species...') if filter_spc_dict: # Load filtered species dictionary filter_spc_dict = load_spc_dict(filter_spc_dict) # Clean work clean = [] for label, spc in spc_info.items(): dict_label, _ = find_species_from_spc_dict(spc, filter_spc_dict) if not dict_label: # cannot find species clean.append(label) else: print( f'Warning: species {label} is cleaned out due to belonging ' f'to filtered species dictionary') spc_info = { label: spc for label, spc in spc_info.items() if label in clean } if libraries_path: # Load thermo libraries libraries = read_yaml_file(libraries_path) thermo_db = load_thermo_database(libraries=libraries['built-in_libs']) for t_lib in libraries['external_libs']: load_thermo_lib_by_path(t_lib, thermo_db) # Clean work clean = [] for label, spc in spc_info.items(): try: thermo_data = thermo_db.get_all_thermo_data( species_from_spc_info(spc)) except: print(f'Warning: Cannot generate thermo for {label}.') continue if len(thermo_data) <= 1: # Only GAV availabel clean.append(label) else: print( f'Warning: species {label} is cleaned out due to existing ' f'in thermo libraries') spc_info = { label: spc for label, spc in spc_info.items() if label in clean } # Make sure there is no duplicates in the spc_info # Iteratively using combine function can help filter out duplicates cleaned_info = {} cleaned_spc_dict = {} for label, spc in spc_info.items(): cleaned_info = combine_spc_info(spc_info1=cleaned_info, spc_info2={label: spc}, spc_dict=cleaned_spc_dict) # Convert species info to ARC input print(f'Eventually, {len(cleaned_info)} species are left...') arc_input1, arc_input2 = {'species': []}, {'species': []} for spc in cleaned_info.values(): if ('multiplicity' in spc) and (spc['multiplicity'] < 3): # closed_shell species and multiplicity 1 species arc_input1['species'].append(spc) else: # other species arc_input2['species'].append(spc) output = output or os.curdir output1 = os.path.join(output, 'input_cleaned_mul12.yml') output2 = os.path.join(output, 'input_cleaned_mul3p.yml') actual_output_path1 = save_yaml_file(output1, arc_input1, overwrite=False) actual_output_path2 = save_yaml_file(output2, arc_input2, overwrite=False) print( f'Species with mulitplicity < 3 are saved to {actual_output_path1}.\n' f'Species with mulitplicity >= 3 are saved to {actual_output_path2}.')