def create_folders(options, structures, publication_template, root=''): out_format = 'json' Dumper.add_representer(collections.OrderedDict, dict_representer) for key in structures: if isinstance(structures[key], dict): d = Path(root).joinpath(key) Path(d).mkdir(parents=True, exist_ok=True) if Path(root).parent.as_posix() == '.': # Have to explicitly convert Path to str # to work under python 3.4 with open(str(Path(root).joinpath('publication.txt')), 'w') as outfile: yaml.dump(publication_template, outfile, indent=4, Dumper=Dumper) if options.energy_corrections: with open( str(Path(root).joinpath('energy_corrections.txt')), 'w') as outfile: yaml.dump(options.energy_corrections, outfile) create_folders(options, structures[key], publication_template={}, root=d) else: ase.io.write( str(Path(root).joinpath(key + '.' + out_format)), structures[key], format=out_format, )
def __init__(cls, name, bases, kwds): """This overlaps quite a bit with YAMLObjectMetaclass.""" if name != "ManagementObject": yaml_tag = u"tag:yaml.org,2002:es.bsc.%s" % (cls.__module__) cls.yaml_loader = Loader cls.yaml_tag = yaml_tag # used by `ManagementObject.to_yaml` logger.trace("YAML TAG : %s", yaml_tag) Loader.add_constructor(yaml_tag, cls.from_yaml) Dumper.add_representer(cls, cls.to_yaml) super(ManagementMetaClass, cls).__init__(name, bases, kwds)
def save_yaml_opts(path_yaml, opts): # Warning: copy is not nested options = copy.copy(opts) # https://gist.github.com/oglops/c70fb69eef42d40bed06 def dict_representer(dumper, data): return dumper.represent_dict(data.items()) Dumper.add_representer(Dict, dict_representer) with open(path_yaml, 'w') as yaml_file: yaml.dump(options, yaml_file, Dumper=Dumper, default_flow_style=False)
def print_yaml(disasm, stream=sys.stdout): from yaml import Dumper dumper = Dumper(stream) # Force a Git-friendly output format: # https://stackoverflow.com/a/8641732 def strdump(dumper, data): return dumper.represent_scalar('tag:yaml.org,2002:str', data, style='|') dumper.add_representer(str, strdump) dumper.open() dumper.represent(disasm) dumper.close()
def make_folders(template, custom_base): """Create a basic folder tree for dumping DFT calculcations for reaction energies. Dear all Use this command make the right structure for your folders for submitting data for Catalysis Hub's Surface Reactions. Start by creating a template file by calling: $ cathub make_folders <template_name> Then open the template and modify it to so that it contains information about your data. You will need to enter publication/dataset information, and specify the types of surfaces, facets and reactions. The 'reactions' entry should include two lists for each reaction; 'reactants' and 'products', corresponding to left- and right hand side of each chemical equation respectively. Remember to balance the equation by including a prefactor or minus sign in the name when relevant. For example: reactions: - reactants: ['CCH3star@ontop'] products: ['Cstar@hollow', 'CH3star@ontop'] - reactants: ['CH4gas', '-0.5H2gas', 'star'] products: ['CH3star'] Please include the phase of the species as an extension: 'gas' for gas phase (i.e. CH4 -> CH4gas) 'star' for empty slab or adsorbed phase. (i.e. OH -> OHstar) The site of adsorbed species is also included as an extension: '@site' (i.e. OHstar in bridge-> OHstar@bridge) Energy corrections to gas phase molecules can be included as: energy_corrections: {H2: 0.1, CH4: -0.15} Then, save the template and call: $ cathub make_folders <template_name> And folders will be created automatically. You can create several templates and call make_folders again if you, for example, are using different functionals or are doing different reactions on different surfaces. After creating your folders, add your output files from the electronic structure calculations at the positions. Accepted file formats include everything that can be read by ASE and contains the total potential energy of the calculation, such as .traj or .OUTCAR files. After dumping your files, run `cathub folder2db <your folder>` to collect the data. """ def dict_representer(dumper, data): return dumper.represent_dict(data.items()) Dumper.add_representer(collections.OrderedDict, dict_representer) if custom_base is None: custom_base = os.path.abspath(os.path.curdir) template = custom_base + '/' + template template_data = ase_tools.REACTION_TEMPLATE if not os.path.exists(template): with open(template, 'w') as outfile: outfile.write( yaml.dump(template_data, indent=4, Dumper=Dumper) + '\n') print( "Created template file: {template}\n".format(**locals()) + ' Please edit it and run the script again to create your folderstructure.\n' + ' Run cathub make_folders --help for instructions') return with open(template) as infile: template_data = yaml.load(infile) title = template_data['title'] authors = template_data['authors'] journal = template_data['journal'] volume = template_data['volume'] number = template_data['number'] pages = template_data['pages'] year = template_data['year'] email = template_data['email'] publisher = template_data['publisher'] doi = template_data['doi'] dft_code = template_data['DFT_code'] dft_functionals = template_data['DFT_functionals'] reactions = template_data['reactions'] crystal_structures = template_data['crystal_structures'] bulk_compositions = template_data['bulk_compositions'] facets = template_data['facets'] energy_corrections = template_data['energy_corrections'] make_folders_template.main(title=title, authors=eval(authors) if isinstance( authors, six.string_types) else authors, journal=journal, volume=volume, number=number, pages=pages, year=year, email=email, publisher=publisher, doi=doi, DFT_code=dft_code, DFT_functionals=dft_functionals, reactions=eval(reactions) if isinstance( reactions, six.string_types) else reactions, custom_base=custom_base, bulk_compositions=bulk_compositions, crystal_structures=crystal_structures, facets=facets, energy_corrections=energy_corrections) pub_id = tools.get_pub_id(title, authors, year) print( "Now dump your DFT output files into the folder, and run 'cathub folder2db {pub_id}'" .format(**locals()))
def make_folders(create_template, template, custom_base, diagnose): """Create a basic folder tree to put in DFT calculcations. Dear all Use this command make the right structure for your folders for submitting data for Catalysis Hub's Surface Reactions. Start by creating a template file by calling: $ cathub make_folders --create-template <template_name> Then open the template and modify it to so that it contains information about your data. You will need to enter publication/dataset information, and specify the types of surfaces, facets and reactions. The 'reactions' entry should include two lists for each reaction; 'reactants' and 'products', corresponding to left- and right hand side of each chemical equation respectively. Remember to balance the equation by including a prefactor or minus sign in the name when relevant. For example: reactions: - reactants: ['CCH3star@ontop'] products: ['Cstar@hollow', 'CH3star@ontop'] - reactants: ['CH4gas', '-0.5H2gas', 'star'] products: ['CH3star'] Please include the phase of the species as an extension: 'gas' for gas phase (i.e. CH4 -> CH4gas) 'star' for empty slab or adsorbed phase. (i.e. OH -> OHstar) The site of adsorbed species is also included as an extension: '@site' (i.e. OHstar in bridge-> OHstar@bridge) Then, save the template and call: $ cathub make_folders <template_name> And folders will be created automatically. You can create several templates and call make_folders again if you, for example, are using different functionals or are doing different reactions on different surfaces. After creating your folders, add your output files from the electronic structure calculations at the positions. Accepted file formats include everything that can be read by ASE and contains the total potential energy of the calculation, such as .traj or .OUTCAR files. """ def dict_representer(dumper, data): return dumper.represent_dict(data.items()) Dumper.add_representer(collections.OrderedDict, dict_representer) if custom_base is None: custom_base = os.path.abspath(os.path.curdir) template_data = collections.OrderedDict({ 'title': 'Fancy title', 'authors': ['Doe, John', 'Einstein, Albert'], 'journal': 'JACS', 'volume': '1', 'number': '1', 'pages': '23-42', 'year': '2017', 'publisher': 'ACS', 'doi': '10.NNNN/....', 'DFT_code': 'Quantum Espresso', 'DFT_functionals': ['BEEF-vdW', 'HSE06'], 'reactions': [ collections.OrderedDict({'reactants': ['2.0H2Ogas', '-1.5H2gas', 'star'], 'products': ['OOHstar@top']}), collections.OrderedDict({'reactants': ['CCH3star@bridge'], 'products': ['Cstar@hollow', 'CH3star@ontop']}), collections.OrderedDict({'reactants': ['CH4gas', '-0.5H2gas', 'star'], 'products': ['CH3star@ontop']}) ], 'bulk_compositions': ['Pt'], 'crystal_structures': ['fcc', 'hcp'], 'facets': ['111'] }) if template is not None: if create_template: if os.path.exists(template): raise UserWarning( "File {template} already exists. Refusing to overwrite" .format(**locals())) with open(template, 'w') as outfile: outfile.write( yaml.dump( template_data, indent=4, Dumper=Dumper) + '\n') return else: with open(template) as infile: template_data = yaml.load(infile) title = template_data['title'] authors = template_data['authors'] journal = template_data['journal'] volume = template_data['volume'] number = template_data['number'] pages = template_data['pages'] year = template_data['year'] publisher = template_data['publisher'] doi = template_data['doi'] dft_code = template_data['DFT_code'] dft_functionals = template_data['DFT_functionals'] reactions = template_data['reactions'] crystal_structures = template_data['crystal_structures'] bulk_compositions = template_data['bulk_compositions'] facets = template_data['facets'] make_folders_template.main( title=title, authors=eval(authors) if isinstance( authors, six.string_types) else authors, journal=journal, volume=volume, number=number, pages=pages, year=year, publisher=publisher, doi=doi, DFT_code=dft_code, DFT_functionals=dft_functionals, reactions=eval(reactions) if isinstance( reactions, six.string_types) else reactions, custom_base=custom_base, bulk_compositions=bulk_compositions, crystal_structures=crystal_structures, facets=facets )
def dump(manifest): Dumper.add_representer(OrderedDict, dict_representer) Dumper.add_representer(str, SafeRepresenter.represent_str) return yaml.dump(manifest, Dumper=Dumper, default_flow_style=False)
formatter = logging.Formatter("%(name)s - %(levelname)s - %(message)s") handler.setFormatter(formatter) logger.addHandler(handler) ### YAML + ORDERED DICT MAGIC from collections import OrderedDict import yaml from yaml import Loader, Dumper from yaml.representer import SafeRepresenter def dict_representer(dumper, data): return dumper.represent_dict(data.iteritems()) Dumper.add_representer(OrderedDict, dict_representer) def dict_constructor(loader, node): return OrderedDict(loader.construct_pairs(node)) Loader.add_constructor(yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG, dict_constructor) def parse_arguments(): parser = argparse.ArgumentParser( description="Sum training weights of classes in training dataset.") parser.add_argument("--era", required=True, help="Experiment era") parser.add_argument("--channel", required=True, help="Analysis channel")
import os import sys from git import RemoteProgress, Repo from yaml import Dumper, Loader, dump, load def represent_none(self, _): return self.represent_scalar('tag:yaml.org,2002:null', '') Dumper.add_representer(type(None), represent_none) composed = { 'version': '3', 'services': dict(), 'volumes': dict(), 'networks': dict(), } class ProgressPrinter(RemoteProgress): def update(self, op_code, cur_count, max_count=None, message=''): print( f"\rProgress: {(cur_count / (max_count or 100.0))*100:4.4}% ", end='', flush=True) def git_clone(git_repo: str, dir_name: str) -> None: if not os.path.exists(dir_name):
except ImportError: print("click not installed\n pip install click") sys.exit(0) # https://gist.github.com/oglops/c70fb69eef42d40bed06 def dict_representer(dumper, data): return dumper.represent_dict(six.iteritems(data)) def dict_constructor(loader, node): return OrderedDict(loader.construct_pairs(node)) Dumper.add_representer(OrderedDict, dict_representer) Dumper.add_representer(str, SafeRepresenter.represent_str) @click.group() def buildout2doodba(): pass @buildout2doodba.command() @click.argument("buildout_configuration_file", nargs=1, type=click.Path(exists=True)) @click.argument("doodba_dir", nargs=1, type=click.Path(exists=True)) def convert_addons(buildout_configuration_file, doodba_dir):
logger = logging.getLogger(__name__) def dict_representer(dumper, data): return dumper.represent_dict(data.items()) def dict_constructor(loader, node): return OrderedDict(loader.construct_pairs(node)) def represent_none(self, data): return self.represent_scalar(u'tag:yaml.org,2002:null', u'') Dumper.add_representer(OrderedDict, dict_representer) Dumper.add_representer(type(None), represent_none) Loader.add_constructor(_mapping_tag, dict_constructor) Dumper.add_representer(str, SafeRepresenter.represent_str) Dumper.add_representer(dict, SafeRepresenter.represent_dict) # Dumper.add_representer(unicode, SafeRepresenter.represent_unicode) # Utility def phex(p, llen=0):
""" Class description goes here. """ from uuid import UUID from yaml import Loader, Dumper JAVA_UUID_TAG = u'tag:yaml.org,2002:java.util.UUID' DATACLAY_ID_PREFIX = u'tag:yaml.org,2002:es.bsc.dataclay.util.ids' def uuid_representer(dumper, data): return dumper.represent_scalar(JAVA_UUID_TAG, str(data)) def uuid_constructor(loader, node): value = loader.construct_scalar(node) return UUID(value) Dumper.add_representer(UUID, uuid_representer) Loader.add_constructor(JAVA_UUID_TAG, uuid_constructor) Loader.add_multi_constructor( DATACLAY_ID_PREFIX, # This ignores the tag, as ImplementationID, OperationID, *ID are always # used directly as their UUID, not their specific type. lambda loader, _, node: uuid_constructor(loader, node))
from yaml import dump, load, FullLoader, YAMLError, Dumper from letra import Label def read(filepath): try: with open(filepath) as stream: return load(stream, Loader=FullLoader) except YAMLError: raise ValueError("Specified template file is not valid yaml") def represent_label(dumper, data): return dumper.represent_mapping( u"tag:yaml.org,2002:map", { "name": data.name, "description": data.description, "color": data.color, }, ) Dumper.add_representer(Label, represent_label) def write(labels, filepath): with open(filepath, "w+") as stream: dump(labels, stream, Dumper=Dumper, sort_keys=False)