def _main(args): """Main entry point. """ institutes = vocabs.get_institutes(args.institution_id) for institute in institutes: for writer in [i(institute) for i in _WRITERS]: writer.execute()
def _main(args): """Main entry point. """ for i in vocabs.get_institutes(args.institution_id): for s in vocabs.get_institute_sources(i): for e in vocabs.get_experiments(): _map_superset_to_subset(args.archive_dir, i, s, e)
def _main(args): """Main entry point. """ institutes = vocabs.get_institutes(args.institution_id) for i in institutes: for s in vocabs.get_institute_sources(i): ctx = ProcessingContext(i, s) ctx.write()
def _main(args): """Main entry point. """ for institution in vocabs.get_institutes(args.institution_id): for source_id in vocabs.get_institute_sources(institution): folder = io_mgr.get_model_folder(institution, source_id, 'cim') shutil.rmtree(folder) os.makedirs(folder)
def _main(args): """Main entry point. """ for i in vocabs.get_institutes(args.institution_id): try: _write(i) except Exception as err: print(i) print(err)
def _main(args): """Main entry point. """ institutes = vocabs.get_institutes(args.institution_id) for i in institutes: for s in vocabs.get_institute_sources(i): for t in pyessv.ESDOC.cmip6.get_model_topics(s): xl = Spreadsheet(i, s, t) xl.write()
def _main(args): """Main entry point. """ errs = [] for i in vocabs.get_institutes(args.institution_id): for verifier in _VERIFIERS: verifier(i, errs) for err in errs: logger.log(err)
def _main(args): """Main entry point. """ for i in vocabs.get_institutes(args.institution_id): for m in vocabs.get_models_by_institution(i): for t in vocabs.get_topics(): for d in vocabs.get_domains(): xl = Spreadsheet(i, m, t, d) logger.log("generating --> {}".format(xl.fpath), app="SH") xl.write()
def _main(args): """Main entry point. """ for i in vocabs.get_institutes(args.institution_id): logger.log("verifying {}:".format(i.raw_name)) errs = [] for verifier in _VERIFIERS: verifier(i, errs) for err in errs: logger.log("... {}".format(err)) logger.log_line()
def _main(): """Main entry point. """ # Set repos for creation / deletion. institutes = [i.canonical_name for i in vocabs.get_institutes()] repos = github.get_repos() repos_to_create = set(institutes).difference(set(repos.keys())) # Instantiate new repos. for institute in repos_to_create: github.create_repo(institute)
def _main(args): """Main entry point. """ # Defensive programming. if not os.path.exists(args.xls_template): raise ValueError("XLS template file does not exist") # Write one file per institute. for i in vocabs.get_institutes(args.institution_id): dest = io_mgr.get_parties_spreadsheet(i) if not os.path.exists(dest): shutil.copy(args.xls_template, dest)
def _main(args): """Main entry point. """ domains = vocabs.get_domains() topics = vocabs.get_topics() for institution in vocabs.get_institutes(args.institution_id): for model in vocabs.get_models_by_institution(institution): for domain in domains: fpath = io_mgr.get_model_cim(institution, domain, model) document = _get_document(institution, domain, model, topics) if document is not None: pyesdoc.write(document, fpath) logger.log("CIM document written to -> {}".format(fpath))
def _main(args): """Main entry point. """ # Initialise CMIP5 to CMIP6 mappings. mappings.init() # Write a JSON file for each CMIP6 institute | CMIP5 document combination. for institution_id in vocabs.get_institutes(): if not args.institution_id in ["all", institution_id.canonical_name]: continue if not cmip5_documents.init(institution_id.canonical_name): continue for output in _yield_outputs(institution_id): output.save() logger.log('... {};'.format(output.fpath))
def _main(args): """Main entry point. """ # Defensive programming. if not os.path.exists(args.xls_template): raise ValueError("XLS template file does not exist") # Take generic template ready to process with institute-specific info. template_name = args.xls_template # Write out a customised template file for every institute. for institution in vocabs.get_institutes(args.institution_id): # !!! Must define this on each iteration instead of outside the 'for' # loop else iterations will include previous changes. !!! generic_template = load_workbook(filename=template_name) # Styles needed for clear, coordinated formatting of processed cells. generic_template.add_named_style(QUESTION_HEADER_STYLE) generic_template.add_named_style(QUESTION_INPUT_BOX_STYLE) # Customise the template appropriately to the given institute: # 1. Set the institute name set_institute_name_in_xls(institution, generic_template) # 2. Set the applicable CMIP6 models for this institute end_cell = set_applicable_models_in_xls(institution, generic_template) # 3. Set the applicable CMIP6 experiments for this institute tab_end_cell = set_applicable_experiments_in_xls( institution, generic_template, end_cell) # Add a note that the tab is done, with a reminder, for clarity. set_end_of_sheet_note(generic_template["Machine 1"], tab_end_cell + 2) # Close template and write the customised template to a new XLS file. generic_template.close() final_spreadsheet_name = "{}_{}_machines.xlsx".format( constants.CMIP6_MIP_ERA, institution.canonical_name) generic_template.save(final_spreadsheet_name) # Place the template into the appropriate directory. # Write one file per institute. dest = io_mgr.get_machines_spreadsheet(institution) logger.log("moving xls file for {}".format(institution.raw_name)) print(final_spreadsheet_name, dest) shutil.copy(final_spreadsheet_name, dest)
def _main(args): """Main entry point. """ # Write a CIM file per CMIP6 institute | source combination. institutes = vocabs.get_institutes(args.institution_id) for i in institutes: # Escape if settings file not found. try: all_settings = io_mgr.load_model_settings( i, _MODEL_PUBLICATION_FNAME) except IOError: warning = '{} model_publications.json not found' warning = warning.format(i.canonical_name) logger.log_warning(warning) continue for s in vocabs.get_institute_sources(i): # Escape if source settings undeclared. try: settings = all_settings[s.canonical_name] except KeyError: warning = '{} :: {} publication settings not found' warning = warning.format(i.canonical_name, s.canonical_name) logger.log_warning(warning) continue # Escape if no settings are switched 'on'. settings = { k: v for (k, v) in settings.items() if settings[k]['publish'] == 'on' } if not settings: continue # Generate content. content = _get_content(i, s, settings) if content is None: warning = '{} :: {} CIM file not found' warning = warning.format(i.canonical_name, s.canonical_name) logger.log_warning(warning) continue # Write CIM file to fs. io_mgr.write_model_cim(i, s, content)
def _main(args): """Main entry point. """ # Defensive programming. if not os.path.exists(args.xls_template): raise ValueError("XLS template file does not exist") # Take generic template ready to process with institute-specific info. template_name = args.xls_template # Write out a customised template file for every institute for institution in vocabs.get_institutes(args.institution_id): institute_inputs_map = get_all_qs_to_inputs_mapping_for_institute( institution) for machine, machine_json_map in institute_inputs_map.items(): all_models_run_on_machine = get_applicable_models(machine_json_map) appl_exps = formatted_applicable_experiments(machine_json_map) for model in all_models_run_on_machine: # Open the template and customise it to the specific loop vars generic_template = load_workbook(filename=template_name) customise_performance_template( generic_template, institution, machine, model, appl_exps) # Close template and save customised XLS to a new XLS file generic_template.close() final_spreadsheet_name = ( "{}_performance_of_{}_on_{}_{}.xlsx".format( constants.CMIP6_MIP_ERA, model, institution.canonical_name, machine ) ) generic_template.save(final_spreadsheet_name) # Place the file into the appropriate directory, ultimately # writing one file per machine and applicable model combination dest = io_mgr.get_performance_spreadsheet( institution, machine, model) logger.log( "moving xls file for {}".format(institution.raw_name)) shutil.copy(final_spreadsheet_name, dest)
def _main(args): """Main entry point. """ if not os.path.exists(args.dest): raise ValueError("Destination folder is invalid") domains = vocabs.get_domains() for institution in vocabs.get_institutes(): for model in vocabs.get_models_by_institution(institution): for domain in domains: fpath = io_mgr.get_model_cim(institution, domain, model) if not os.path.exists(fpath): continue fname = hashlib.md5(fpath.split("/")[-1]).hexdigest() shutil.copy( fpath, os.path.join(args.dest, '{}.json'.format(fname)) )
def _main(): """Main entry point. """ for i in vocabs.get_institutes(): logger.log(i.canonical_name)
def _main(args): """Main entry point. """ for i in vocabs.get_institutes(args.institution_id): _write(i)