def load_model(self, model, name=None): """ Loads the object info from data model content Parameters ---------- model : str or DataModelDict Model content or file path to model content. name : str, optional The name to assign to the record. Not used by this class. """ assert name is None, 'name is not used by this class' model = DM(model) imp = model.find('implementation') self.key = imp['key'] self.id = imp.get('id', None) self.status = imp.get('status', None) self.date = imp.get('date', None) self.type = imp.get('type', None) if 'notes' in imp: self.notes = imp['notes']['text'] else: self.notes = None self.artifacts = [] for artifact in imp.iteraslist('artifact'): self.add_artifact(model=DM([('artifact', artifact)])) self.parameters = [] for parameter in imp.iteraslist('parameter'): self.add_parameter(model=DM([('parameter', parameter)])) self.links = [] for link in imp.iteraslist('link'): self.add_link(model=DM([('link', link)]))
def load(self, model): """ Loads the object info from data model content Parameters ---------- model : str or DataModelDict Model content or file path to model content. """ # Check if model is data model try: model = DM(model) except: bibtex = model else: bibtex = model.find('bibtex') for key in self.asdict(): delattr(self, key) # Parse and extract content parser = BibTexParser() parser.customization = convert_to_unicode bib_database = bibtexparser.loads(bibtex, parser=parser) assert len( bib_database.entries) == 1, 'bibtex must be for a single reference' bibdict = bib_database.entries[0] for key, value in bibdict.items(): setattr(self, key, value)
def load_parameters(self, input_dict): """ Interprets calculation parameters. Parameters ---------- input_dict : dict Dictionary containing input parameter key-value pairs. """ # Set default keynames keymap = self.keymap # Extract input values and assign default values self.gammasurface_file = input_dict.get(keymap['gammasurface_file'], None) self.__gammasurface_content = input_dict.get( keymap['gammasurface_content'], None) # Replace defect model with defect content if given gamma_file = self.gammasurface_file if self.gammasurface_content is not None: gamma_file = self.gammasurface_content # If defect model is given if gamma_file is not None: g_model = DM(gamma_file) self.__gamma = g_model self.__calc_key = g_model.finds('key')[0] else: raise ValueError('gammasurface_file is required')
def add_tar(database_info, record=None, name=None, style=None, root_dir=None): """Archives calculation folder as a tar file and saves to the database""" #Create Record object if not given if record is None: record = get_record(database_info, name=name, style=style) #Issue a TypeError for competing kwargs elif style is not None or name is not None: raise TypeError('kwargs style and name cannot be given with kwarg record') #Verify that record exists else: record = get_record(database_info, name=record.name, style=record.style) #Check if an archive already exists model = DM(record.content) if len(model.finds('archive')) > 0: raise ValueError('Record already has an archive') #Make archive shutil.make_archive(record.name, 'gztar', root_dir=root_dir, base_dir=record.name) #Upload archive root_key = model.keys()[0] model[root_key]['archive'] = DM() model[root_key]['archive']['url'] = database_info.add_file(record.name+'.tar.gz') update_record(database_info, name=record.name, style=record.style, content=model.xml()) #Remove local archive copy os.remove(record.name+'.tar.gz')
def load(self, model): model = DM(model).find('request') self.date = model['date'] self.comment = model.get('comment', None) self.__systems = [] for system in model.aslist('system'): self.add_system(model=DM([('system', system)]))
def load_model(self, model, name=None): """ Loads record contents from a given model. Parameters ---------- model : str or DataModelDict The model contents of the record to load. name : str, optional The name to assign to the record. Often inferred from other attributes if not given. """ super().load_model(model, name=name) req = DM(model).find('request') self.date = req['date'] self.comment = req.get('comment', None) self.__systems = [] for system in req.aslist('system'): self.add_system(model=DM([('system',system)])) if name is not None: self.name = name else: elements = [] for system in self.systems: elements.extend(system.elements) self.name = f'{self.date} {" ".join(elements)}'
def load(self, model): """ Loads the object info from data model content Parameters ---------- model : str or DataModelDict Model content or file path to model content. """ model = DM(model) imp = model.find('implementation') self.key = imp['key'] self.id = imp.get('id', None) self.status = imp.get('status', None) self.date = imp.get('date', None) self.type = imp.get('type', None) if 'notes' in imp: self.notes = imp['notes']['text'] else: self.notes = None self.artifacts = [] for artifact in imp.iteraslist('artifact'): self.add_artifact(model=DM([('artifact', artifact)])) self.parameters = [] for parameter in imp.iteraslist('parameter'): self.add_parameter(model=DM([('parameter', parameter)])) self.weblinks = [] for weblink in imp.iteraslist('web-link'): self.add_weblink(model=DM([('web-link', weblink)]))
def load(self, model): model = DM(model).find('action') self.date = model['date'] self.type = model['type'] self.comment = model.get('comment', None) self.__potentials = [] for potential in model.aslist('potential'): self.potentials.append(PotInfo(DM([('potential', potential)])))
def content(self, value): if value is not None: value = DM(value) if len(value.keys()) == 1 and self.contentroot in value: self.__content = DM(value) else: raise ValueError('Invalid root element for content') else: self.__content = None
def buildpaircoeff(self): """Builds the pair_coeff command lines""" if self.paramfile is None: raise ValueError('paramfile must be set') paircoeff = DM() paircoeff.append('term', DM([('file', self.paramfile)])) paircoeff.append('term', DM([('symbols', 'True')])) return paircoeff
def unset_run_directory(name=None): """ Deletes the settings for a pre-defined run_directory from the settings file. Parameters ---------- name : str The name assigned to a pre-defined run_directory. """ # Get information from settings file settings = load_settings() run_directories = settings['iprPy-defined-parameters'].aslist( 'run_directory') # Ask for name if not given if name is None: if len(run_directories) > 0: print('Select a run_directory:') for i, run_directory in enumerate(run_directories): print(i + 1, run_directory['name']) choice = screen_input(':') try: choice = int(choice) except: name = choice else: name = run_directories[choice - 1]['name'] else: print('No run_directories currently set') sys.exit() # Verify listed name exists else: try: run_directory_settings = settings.find('run_directory', yes={'name': name}) except: raise ValueError('run_directory ' + name + ' not found') print('run_directory', name, 'found') test = screen_input('Delete settings? (must type yes):') if test == 'yes': if len(run_directories) == 1: del (settings['iprPy-defined-parameters']['run_directory']) else: new = DM() for run_directory in run_directories: if run_directory['name'] != name: new.append('run_directory', run_directory) settings['iprPy-defined-parameters']['run_directory'] = new[ 'run_directory'] save_settings(settings) print('Settings for run_directory', name, 'successfully deleted')
def buildpairstyle(self): pairstyle = DM() pairstyle['type'] = self.pair_style for term in self.pair_style_terms: if isinstance(term, (int, float)): pairstyle.append('term', DM([('parameter', term)])) else: pairstyle.append('term', DM([('option', str(term))])) return pairstyle
def load(self, model, citations=None): """ Load a Potential model into the Potential class. Parameters ---------- model : str or DataModelDict Model content or file path to model content. """ # Set given citations objects self.citations = citations # Load model model = DM(model) potential = model.find('interatomic-potential') # Extract information self.key = potential['key'] self.recorddate = potential['record-version'] self.developers = potential.get('developers', None) self.year = potential.get('year', None) description = potential['description'] self.developers = description.get('developers', None) self.year = description.get('year', None) self.modelname = description.get('model-name', None) dois = [] for citation in description.iteraslist('citation'): dois.append(citation['DOI']) self.dois = dois if 'notes' in description: self.notes = description['notes']['text'] else: self.notes = None felements = potential.aslist('fictional-element') oelements = potential.aslist('other-element') elements = potential.aslist('element') if len(felements) > 0: assert len(elements) == 0 self.fictional = True self.elements = felements else: assert len(elements) > 0 self.fictional = False self.elements = elements if len(oelements) > 0: assert len(oelements) == 1 self.othername = oelements[0] else: self.othername = None if self.id != potential['id']: print(f"Different ids: {self.id} != {potential['id']} {self.key}")
def build_model(self, model): if self.exists is True: model['free-surfaces'] = ec_model = DM() for composition in np.unique(self.data.composition): comp_results = self.data[self.data.composition == composition] # Build PotentialProperties data comp_model = DM() comp_model['composition'] = composition for prototype in np.unique(comp_results.prototype): proto_results = comp_results[comp_results.prototype == prototype] proto_model = DM() proto_model['prototype'] = prototype for alat in np.unique(proto_results.a): alat_records = proto_results[proto_results.a == alat] alat_model = DM() alat_model['a'] = alat for i in alat_records.sort_values(['E_f']).index: series = alat_records.loc[i] measurement = DM() measurement['surface'] = series.surface measurement['energy'] = series.E_f alat_model.append('measurement', measurement) proto_model.append('alats', alat_model) comp_model.append('prototypes', proto_model) ec_model.append('compositions', comp_model)
def unset_run_directory(name=None): """ Deletes the settings for a pre-defined run_directory from the settings file. Parameters ---------- name : str The name assigned to a pre-defined run_directory. """ # Get information from settings file settings = load_settings() run_directories = settings['iprPy-defined-parameters'].aslist('run_directory') # Ask for name if not given if name is None: if len(run_directories) > 0: print('Select a run_directory:') for i, run_directory in enumerate(run_directories): print(i+1, run_directory['name']) choice = screen_input(':') try: choice = int(choice) except: name = choice else: name = run_directories[choice-1]['name'] else: print('No run_directories currently set') sys.exit() # Verify listed name exists else: try: run_directory_settings = settings.find('run_directory', yes={'name':name}) except: raise ValueError('run_directory '+ name + ' not found') print('run_directory', name, 'found') test = screen_input('Delete settings? (must type yes):') if test == 'yes': if len(run_directories) == 1: del(settings['iprPy-defined-parameters']['run_directory']) else: new = DM() for run_directory in run_directories: if run_directory['name'] != name: new.append('run_directory', run_directory) settings['iprPy-defined-parameters']['run_directory'] = new['run_directory'] save_settings(settings) print('Settings for run_directory', name, 'successfully deleted')
def buildpaircoeff(self): paircoeff = DM() paircoeff.append('term', DM([('file', self.libfile)])) paircoeff.append('term', DM([('option', self.symbollist)])) paircoeff.append('term', DM([('file', self.paramfile)])) paircoeff.append('term', DM([('symbols', 'True')])) return paircoeff
def unset_database(name=None): """ Deletes the settings for a pre-defined database from the settings file. Parameters ---------- name : str The name assigned to a pre-defined database. """ # Get information from settings file settings = load_settings() databases = settings['iprPy-defined-parameters'].aslist('database') # Ask for name if not given if name is None: if len(databases) > 0: print('Select a database:') for i, database in enumerate(databases): print(i + 1, database['name']) choice = screen_input(':') try: choice = int(choice) except: name = choice else: name = databases[choice - 1]['name'] else: print('No databases currently set') return None # Verify listed name exists else: try: settings.find('database', yes={'name': name}) except: raise ValueError(f'Database {name} not found') print(f'Database {name} found') test = screen_input('Delete settings? (must type yes):') if test == 'yes': if len(databases) == 1: del (settings['iprPy-defined-parameters']['database']) else: new = DM() for database in databases: if database['name'] != name: new.append('database', database) settings['iprPy-defined-parameters']['database'] = new['database'] save_settings(settings) print(f'Settings for database {name} successfully deleted')
def load_model(self, model): """ Loads record contents from a given model. Parameters ---------- model : str or DataModelDict The model contents of the record to load. """ model = DM(model).find('system') self.formula = model.get('chemical-formula', None) self.elements = model.get('element', None)
def buildcommands(self): commands = [] for line in self.command_terms: if len(line) == 0: continue command = DM() for term in line: if isinstance(term, (int, float)): command.append('term', DM([('parameter', term)])) else: command.append('term', DM([('option', str(term))])) commands.append(command) return commands
def __init__(self, potential): """ Builds a PotInfo component class. Parameters ---------- potential : Potential, str or DataModelDict A Potential record object or DataModelDict contents for a Potential record. This prodives the information to link the Potential to the Action. """ if isinstance(potential, Potential.Potential): # Extract relevant properties from the Potential object self.__id = potential.id self.__key = potential.key self.__dois = [] for citation in potential.citations: try: self.__dois.append(citation.doi) except: pass self.__fictional = potential.fictional self.__elements = potential.elements self.__othername = potential.othername else: # Extract relevant properties from potential record contents model = DM(potential).find('potential') self.__id = model['id'] self.__key = model['key'] self.__dois = model.aslist('doi') felements = model.aslist('fictional-element') oelements = model.aslist('other-element') elements = model.aslist('element') if len(felements) > 0: assert len(elements) == 0 self.__fictional = True self.__elements = felements else: assert len(elements) > 0 self.__fictional = False self.__elements = elements if len(oelements) > 0: assert len(oelements) == 1 self.__othername = oelements[0] else: self.__othername = None
def read_input(fname): """Interpret input file into DataModelDict""" with open(fname) as f: input_dict = DataModelDict() for line in f: terms = line.split() if len(terms) != 0 and terms[0][0] != '#': if len(terms) == 1: input_dict[terms[0]] = None else: for i in xrange(1, len(terms)): input_dict.append(terms[0], terms[i]) return input_dict
def __is_new_record(record_dir, v_dict): """Check if a matching record already exists.""" try: flist = os.listdir(record_dir) except: os.makedirs(record_dir) return True is_new = True for fname in flist: if os.path.splitext(fname)[1] in ['.xml', '.json']: with open(os.path.join(record_dir, fname)) as f: record = DM(f) sys_file = record.find('system-info')['artifact']['file'] load_file = ' '.join(v_dict['load'].split()[1:]) if sys_file != load_file: continue ptd_name = record.find('point-defect')['identifier']['name'] if ptd_name != v_dict['ptd_name']: continue a_mult = record.find('a-multiplyer') b_mult = record.find('b-multiplyer') c_mult = record.find('c-multiplyer') mults = v_dict['size_mults'].split() if len(mults) == 3: a_m = abs(int(mults[0])) b_m = abs(int(mults[1])) c_m = abs(int(mults[2])) elif len(mults) == 6: a_m = int(mults[1]) - int(mults[0]) b_m = int(mults[3]) - int(mults[2]) c_m = int(mults[5]) - int(mults[4]) else: raise ValueError('Invalid size_mults term') if (a_m, b_m, c_m) == (a_mult, b_mult, c_mult): is_new = False break return is_new
def load(self, model, pot_dir=None): """ loads LAMMPS-potential data model info. Arguments: model -- a string or file-like obect of a json/xml data model containing a LAMMPS-potential branch. pot_dir -- (optional) the directory location of any artifacts associated with the potential. """ self.__dm = DataModelDict(model).find('LAMMPS-potential') for atom in self.__dm.iteraslist('atom'): #Check if element is listed try: test = atom['element'] #If no element is listed, symbol and mass must be except: try: test = atom['symbol'] test = atom['mass'] atom['element'] = atom['symbol'] except: raise KeyError( "Error reading Potential's atomic info: mass and symbol are needed if element is not given!" ) #Check if symbol is listed. If not, make symbol = element try: test = atom['symbol'] except: atom['symbol'] = atom['element'] #Check if mass is listed. If not, set to standard value of element try: mass_check = atom['mass'] except: atom['mass'] = atomic_mass(atom['element']) assert isinstance(atom['mass'], float), 'Mass needs to be a number!' if pot_dir is not None: self.pot_dir = pot_dir else: self.pot_dir = ''
def buildpaircoeff(self): # Universal interactions: ignore symbols if len(self.interactions ) == 1 and 'symbols' not in self.interactions[0]: paircoeff = DM() for term in self.interactions[0]['terms']: if isinstance(term, (int, float)): paircoeff.append('term', DM([('parameter', term)])) else: paircoeff.append('term', DM([('option', str(term))])) return paircoeff # Interactions with symbols else: paircoeffs = [] # Verify correct number of interactions if self.symbols is not None: potsymbols = self.symbols else: potsymbols = self.elements expected = comb(len(potsymbols), 2, exact=True, repetition=True) if len(self.interactions) != expected: raise ValueError( f'Not all interactions set: expected {expected}, found {len(self.interactions)}' ) # Build for interaction in self.interactions: paircoeff = DM() paircoeff['interaction'] = DM([('symbol', interaction['symbols'])]) for term in interaction['terms']: if isinstance(term, (int, float)): paircoeff.append('term', DM([('parameter', term)])) else: paircoeff.append('term', DM([('option', str(term))])) paircoeffs.append(paircoeff) if len(paircoeffs) == 1: paircoeffs = paircoeffs[0] return paircoeffs
def buildpaircoeff(self): """Builds the pair_coeff command lines""" if self.libfile is None: raise ValueError('libfile must be set') if self.paramfile is None: paramfile = 'NULL' else: paramfile = self.paramfile paircoeff = DM() paircoeff.append('term', DM([('file', self.libfile)])) paircoeff.append('term', DM([('option', self.symbollist)])) paircoeff.append('term', DM([('file', paramfile)])) paircoeff.append('term', DM([('symbols', True)])) return paircoeff
def model(self, **kwargs): """ Return or set DataModelDict representation of the gamma surface. Parameters ---------- model : str, file-like object or DataModelDict, optional XML/JSON content to extract gamma surface energy from. If not given, model content will be generated. length_unit : str, optional Units to give length values in. Default is Angstrom. energy_unit : str, optional units to give energy values in. Default is eV. Returns ------- DataModelDict A dictionary containing the stacking fault data of the GammaSurface object. """ # Set values if model given if 'model' in kwargs: assert len( kwargs ) == 1, 'no keyword arguments supported with model reading' model = DM(kwargs['model']) # Read in shiftvectors, i.e., a1 and a2 self.a1vect = np.array(model.find('shiftvector1').split(), dtype=float) self.a2vect = np.array(model.find('shiftvector2').split(), dtype=float) # Read in stacking fault data gsf = model.find('stacking-fault-relation') data = OrderedDict() data['a1'] = gsf['shift-vector-1-fraction'] data['a2'] = gsf['shift-vector-2-fraction'] data['E_gsf'] = uc.value_unit(gsf['energy']) data['delta'] = uc.value_unit(gsf['plane-separation']) self.__data = pd.DataFrame(data) self.fit()
def get_tar(database_info, record=None, name=None, style=None): """Retrives a stored calculation tar archive""" #Create Record object if not given if record is None: record = get_record(database_info, name=name, style=style) #Issue a TypeError for competing kwargs elif style is not None or name is not None: raise TypeError('kwargs style and name cannot be given with kwarg record') #Verify that record exists else: record = get_record(database_info, name=record.name, style=record.style) #Extract url to tar file model = DM(record.content) url = model.find('archive')['url'] return tarfile.open(fileobj = BytesIO(database_info.get_file(url)))
def __extract_model_terms(self): """Extracts family and symbols values from load_file if needed""" # Check for file and contents if self.load_content is not None: load_file = self.load_content elif self.load_file is not None: load_file = self.load_file.as_posix() else: raise ValueError('load_file not set') # Try to extract info from system_model files if self.load_style == 'system_model': try: model = DM(load_file).finds(f'{self.modelprefix}system-info')[0] except: pass else: # Extract family value or set as load_file's name if self.family is None: self.family = model.get('family', Path(self.load_file).stem) if self.symbols is None: symbols = model.get('symbol', None) if symbols is not None and len(symbols) > 0: self.symbols = symbols if self.composition is None: self.composition = model.get('composition', None) # Try to extract info from other files else: if self.family is None: self.family = Path(self.load_file).stem if self.symbols is None: symbols = self.ucell.symbols self.composition
def buildpaircoeff(self): if self.paramfile is None: raise ValueError('paramfile must be set') paircoeff = DM() paircoeff.append('term', DM([('option', self.symbollist)])) paircoeff.append('term', DM([('file', self.paramfile)])) paircoeff.append('term', DM([('symbols', 'True')])) return paircoeff
def build_model(self, model): if self.exists is True: model['elastic-constants'] = ec_model = DM() for composition in np.unique(self.data.composition): comp_results = self.data[self.data.composition == composition] # Build PotentialProperties data comp_model = DM() comp_model['composition'] = composition for prototype in np.unique(comp_results.prototype): proto_results = comp_results[comp_results.prototype == prototype] proto_model = DM() proto_model['prototype'] = prototype for alat in np.unique(proto_results.a): alat_records = proto_results[proto_results.a == alat] alat_model = DM() alat_model['a'] = alat for i in alat_records.sort_values( ['strainrange', 'straindirection']).index: series = alat_records.loc[i] measurement = DM() if series.straindirection == 'positive': measurement[ 'strain'] = '+%s' % series.strainrange elif series.straindirection == 'negative': measurement[ 'strain'] = '-%s' % series.strainrange else: raise ValueError('Unknown straindirection') for i in range(6): for j in range(6): measurement[f'C{i+1}{j+1}'] = series.Cij[i, j] alat_model.append('measurement', measurement) proto_model.append('alats', alat_model) comp_model.append('prototypes', proto_model) ec_model.append('compositions', comp_model)
def __init__(self, potential): if isinstance(potential, Potential): self.__id = potential.id self.__key = potential.key self.__dois = [] for citation in potential.citations: self.__dois.append(citation.doi) self.__fictional = potential.fictional self.__elements = potential.elements self.__othername = potential.othername elif isinstance(potential, DM): model = DM(potential).find('potential') self.__id = model['id'] self.__key = model['key'] self.__dois = model.aslist('doi') felements = model.aslist('fictional-element') oelements = model.aslist('other-element') elements = model.aslist('element') if len(felements) > 0: assert len(elements) == 0 self.__fictional = True self.__elements = felements else: assert len(elements) > 0 self.__fictional = False self.__elements = elements if len(oelements) > 0: assert len(oelements) == 1 self.__othername = oelements[0] else: self.__othername = None else: raise TypeError('Invalid potential content')
def main(*args): #Read in input script terms run_directory, lib_directory = __initial_setup(*args) for simmy in glob.iglob(os.path.join(lib_directory, '*', '*', '*', '*', '*.tar.gz')): try: tarry = tarfile.open(simmy, 'r:gz') tarry.extractall(run_directory) tarry.close() os.remove(simmy) except: print 'Failed to extract', simmy tarry.close() for biddy in glob.iglob(os.path.join(run_directory, '*', '*.bid')): os.remove(biddy) for result in glob.iglob(os.path.join(run_directory, '*', 'results.json')): os.remove(result) for record in glob.iglob(os.path.join(run_directory, '*', '*.json')): with open(record) as f: model = DM(f) key = model.keys()[0] try: if model[key]['status'] == 'error': model[key]['status'] = 'not calculated' del(model[key]['error']) with open(record, 'w') as f: model.json(fp=f, indent=4) except: pass for record in glob.iglob(os.path.join(lib_directory, '*', '*', '*', '*', '*.json')): with open(record) as f: model = DM(f) key = model.keys()[0] try: if model[key]['status'] == 'error': model[key]['status'] = 'not calculated' del(model[key]['error']) with open(record, 'w') as f: model.json(fp=f, indent=4) except: pass
def load(self, model, pot_dir=None): """ loads potential-LAMMPS data model info. Arguments: model -- a string or file-like obect of a json/xml data model containing a potential-LAMMPS branch. pot_dir -- (optional) the directory location of any artifacts associated with the potential. """ # Load model and find potential-LAMMPS if isinstance(model, DM): self.__dm = model.find('potential-LAMMPS') else: with uber_open_rmode(model) as f: self.__dm = DM(f).find('potential-LAMMPS') for atom in self.__dm.iteraslist('atom'): #Check if element is listed try: test = atom['element'] #If no element is listed, symbol and mass must be except: try: test = atom['symbol'] test = atom['mass'] atom['element'] = atom['symbol'] except: raise KeyError("Error reading Potential's atomic info: mass and symbol are needed if element is not given!") #Check if symbol is listed. If not, make symbol = element try: test = atom['symbol'] except: atom['symbol'] = atom['element'] #Check if mass is listed. If not, set to standard value of element try: mass_check = atom['mass'] except: atom['mass'] = atomic_mass(atom['element']) assert isinstance(atom['mass'], float), 'Mass needs to be a number!' if pot_dir is not None: self.pot_dir = pot_dir else: self.pot_dir = ''
def main(args): """Main function for running calc_struct_static.py""" try: infile = args[0] try: UUID = args[1] except: UUID = str(uuid.uuid4()) except: raise ValueError('Input file not given') #Read in parameters from input file input_dict = read_input(infile) #Initial parameter setup lammps_exe = input_dict.get('lammps_exe') pot_dir = input_dict.get('potential_dir', '') symbols = input_dict.get('symbols') u_length = input_dict.get('length_display_units', 'angstrom') u_press = input_dict.get('pressure_display_units', 'GPa') u_energy = input_dict.get('energy_display_units', 'eV') r_min = input_dict.get('r_min', None) r_max = input_dict.get('r_max', None) if r_min is None: r_min = uc.get_in_units(2.0, 'angstrom') else: r_min = uc.get_in_units(float(r_min), u_length) if r_max is None: r_max = uc.get_in_units(5.0, 'angstrom') else: r_max = uc.get_in_units(float(r_max), u_length) steps = int(input_dict.get('steps', 200)) #read in potential_file with open(input_dict['potential_file']) as f: potential = lmp.Potential(f, pot_dir) #read in prototype_file with open(input_dict['crystal_file']) as f: try: ucell = am.models.crystal(f)[0] except: f.seek(0) ucell = am.models.cif_cell(f)[0] #Run ecoh_vs_r rvals, avals, evals = ecoh_vs_r(lammps_exe, deepcopy(ucell), potential, symbols, rmin=r_min, rmax=r_max, rsteps=steps) #Use plot to get rough lattice parameter guess, a0, and build ucell a0 = avals[np.argmin(evals)] cell_0 = ucell.model(symbols=symbols, box_unit='scaled') ucell.box_set(a = a0, b = a0 * ucell.box.b / ucell.box.a, c = a0 * ucell.box.c / ucell.box.a, scale=True) #Run quick_aCij to refine values results = quick_a_Cij(lammps_exe, ucell, potential, symbols) #Plot Ecoh vs. r plt.title('Cohesive Energy vs. Interatomic Spacing') plt.xlabel('r (' + u_length + ')') plt.ylabel('Cohesive Energy (' + u_energy + '/atom)') plt.plot(uc.get_in_units(rvals, u_length), uc.get_in_units(evals, u_energy)) plt.savefig('Ecoh_vs_r.png') plt.close() ucell_new = results['ucell_new'] cell_1 = ucell_new.model(symbols=symbols, box_unit=u_length) ecoh = uc.get_in_units(results['ecoh'], u_energy) C = results['C'] output = DataModelDict() output['calculation-crystal-phase'] = calc = DataModelDict() calc['calculation-id'] = UUID with open(input_dict['potential_file']) as f: potdict = DataModelDict(f) calc['potential'] = potdict['LAMMPS-potential']['potential'] calc['crystal-info'] = DataModelDict() calc['crystal-info']['artifact'] = input_dict['crystal_file'] calc['crystal-info']['symbols'] = symbols calc['phase-state'] = DataModelDict() calc['phase-state']['temperature'] = DataModelDict([('value', 0.0), ('unit', 'K')]) calc['phase-state']['pressure'] = DataModelDict([('value', 0.0), ('unit', u_press)]) calc['as-constructed-atomic-system'] = cell_0['atomic-system'] calc['relaxed-atomic-system'] = cell_1['atomic-system'] c_family = cell_1['atomic-system']['cell'].keys()[0] calc['cohesive-energy'] = DataModelDict([('value', ecoh), ('unit', u_energy)]) calc['elastic-constants'] = C.model(unit=u_press, crystal_system=c_family)['elastic-constants'] calc['cohesive-energy-relation'] = DataModelDict() calc['cohesive-energy-relation']['r'] = DataModelDict([('value', list(uc.get_in_units(rvals, u_length))), ('unit', u_length)]) calc['cohesive-energy-relation']['a'] = DataModelDict([('value', list(uc.get_in_units(avals, u_length))), ('unit', u_length)]) calc['cohesive-energy-relation']['cohesive-energy'] = DataModelDict([('value', list(uc.get_in_units(evals, u_length))), ('unit', u_energy)]) with open('results.json', 'w') as f: output.json(fp=f, indent=4)
def load(model, symbols=None, key='atomic-system', index=0): """ Read in a data model containing a crystal structure. Parameters ---------- model : str, file-like object or DataModelDict The data model to read. symbols : tuple, optional Allows the list of element symbols to be assigned during loading. key : str, optional The key identifying the root element for the system definition. Default value is 'atomic-system'. index : int, optional. If the full model has multiple key entries, the index specifies which to access. Default value is 0 (first, or only entry). Returns ------- system : atomman.System The system object associated with the data model. """ # Pull system model out of data model using key and index a_sys = DM(model).finds(key) if len(a_sys) == 0: raise KeyError(key + ' not found in model') try: a_sys = a_sys[index] except: raise IndexError('Invalid index ' + str(index) + ' for model key ' + key) # Extract crystal system and box values c_system = list(a_sys['cell'].keys())[0] cell = a_sys['cell'][c_system] if c_system == 'cubic': a = b = c = uc.value_unit(cell['a']) alpha = beta = gamma = 90.0 elif c_system == 'hexagonal': a = b = uc.value_unit(cell['a']) c = uc.value_unit(cell['c']) alpha = beta = 90.0 gamma = 120.0 elif c_system == 'tetragonal': a = b = uc.value_unit(cell['a']) c = uc.value_unit(cell['c']) alpha = beta = gamma = 90.0 elif c_system == 'trigonal' or c_system == 'rhombohedral': a = b = c = uc.value_unit(cell['a']) alpha = beta = gamma = cell['alpha'] elif c_system == 'orthorhombic': a = uc.value_unit(cell['a']) b = uc.value_unit(cell['b']) c = uc.value_unit(cell['c']) alpha = beta = gamma = 90.0 elif c_system == 'monoclinic': a = uc.value_unit(cell['a']) b = uc.value_unit(cell['b']) c = uc.value_unit(cell['c']) alpha = gamma = 90.0 beta = cell['beta'] elif c_system == 'triclinic': a = uc.value_unit(cell['a']) b = uc.value_unit(cell['b']) c = uc.value_unit(cell['c']) alpha = cell['alpha'] beta = cell['beta'] gamma = cell['gamma'] box = Box(a=a, b=b, c=c, alpha=alpha, beta=beta, gamma=gamma) # Count atypes and generate list of symbols if given atoms = [] scale = None all_atypes = np.array(a_sys.finds('component')) all_symbols = np.array(a_sys.finds('symbol')) all_elements = np.array(a_sys.finds('element')) if len(all_atypes) == 0: if len(all_symbols) != 0: lsymbols, atypes = np.unique(all_symbols, return_inverse=True) elif len(all_elements) != 0: lsymbols, atypes = np.unique(all_elements, return_inverse=True) else: raise ValueError('No atom components, symbols or elements listed') else: atypes = all_atypes lsymbols = [None for i in range(max(all_atypes))] if len(all_elements) != 0 and len(all_symbols) == 0: all_symbols = all_elements if len(all_symbols) != 0: assert len(all_symbols) == len(atypes) sym_dict = {} for atype, symbol in zip(atypes, all_symbols): if atype not in sym_dict: sym_dict[atype] = symbol else: assert sym_dict[atype] == symbol for atype, symbol in iteritems(sym_dict): lsymbols[atype-1] = symbol # Use lsymbols if symbols parameter is not given. if symbols is None: symbols = lsymbols # Read per-atom properties natoms = len(atypes) prop = OrderedDict() prop['atype'] = atypes prop['pos'] = np.zeros((natoms, 3), dtype='float64') count = 0 pos_units = [] for atom in a_sys.iteraslist('atom'): # Read in pos for atom and unit info prop['pos'][count] = uc.value_unit(atom['position']) pos_units.append(atom['position'].get('unit', None)) # Add other per-atom properties for property in atom.iteraslist('property'): if property['name'] not in prop: value = uc.value_unit(property) shape = (natoms, ) + value.shape prop[property['name']] = np.zeros(shape, dtype=value.dtype) prop[property['name']][count] = uc.value_unit(property) count += 1 pos_unit = np.unique(pos_units) assert len(pos_unit) == 1, 'Mixed units for positions' if pos_unit[0] == 'scaled': scale=True else: scale=False atoms = Atoms(**prop) system = System(box=box, atoms=atoms, scale=scale, symbols=symbols) return system
class Potential(object): """class for building LAMMPS input lines from a LAMMPS-potential data model.""" def __init__(self, model, pot_dir=None): """ initializes an instance associated with a LAMMPS-potential data model. Arguments: model -- a string or file-like obect of a json/xml data model containing a LAMMPS-potential branch. pot_dir -- (optional) the directory location of any artifacts associated with the potential. """ self.load(model, pot_dir) def load(self, model, pot_dir=None): """ loads LAMMPS-potential data model info. Arguments: model -- a string or file-like obect of a json/xml data model containing a LAMMPS-potential branch. pot_dir -- (optional) the directory location of any artifacts associated with the potential. """ self.__dm = DataModelDict(model).find('LAMMPS-potential') for atom in self.__dm.iteraslist('atom'): #Check if element is listed try: test = atom['element'] #If no element is listed, symbol and mass must be except: try: test = atom['symbol'] test = atom['mass'] atom['element'] = atom['symbol'] except: raise KeyError("Error reading Potential's atomic info: mass and symbol are needed if element is not given!") #Check if symbol is listed. If not, make symbol = element try: test = atom['symbol'] except: atom['symbol'] = atom['element'] #Check if mass is listed. If not, set to standard value of element try: mass_check = atom['mass'] except: atom['mass'] = atomic_mass(atom['element']) assert isinstance(atom['mass'], float), 'Mass needs to be a number!' if pot_dir is not None: self.pot_dir = pot_dir else: self.pot_dir = '' def __str__(self): """The string of the Potential returns its human-readable id""" return self.id @property def pot_dir(self): """The directory containing files associated with a given potential.""" return str(self.__pot_dir) @pot_dir.setter def pot_dir(self, value): self.__pot_dir = str(value) @property def id(self): """Human-readable identifier.""" return self.__dm['potential']['id'] @property def uuid(self): """uuid hash-key.""" return self.__dm['potential']['key'] @property def units(self): """LAMMPS units option.""" return self.__dm['units'] @property def atom_style(self): """LAMMPS atom_style option.""" return self.__dm['atom_style'] @property def symbols(self): """List of all atom-model symbols.""" symbols = [] for atom in self.__dm.iteraslist('atom'): symbols.append(str(atom['symbol'])) return symbols def elements(self, symbols=None): """ Return list of element names associated with a list of atom-model symbols. Arguments: symbols -- List of atom-model symbols. If None (default), will use all of the Potential's symbols, i.e. Potential.symbols. """ if symbols is None: symbols = self.symbols if not isinstance(symbols, (list, tuple)): symbols = [symbols] elements = [] for symbol in symbols: for atom in self.__dm.iteraslist('atom'): if symbol == atom['symbol']: elements.append(str(atom['element'])) break assert len(symbols) == len(elements), 'Not all elements found!' return elements def masses(self, symbols=None): """ Return list of element masses associated with a list of atom-model symbols. Arguments: symbols -- List of atom-model symbols. If None (default), will use all of the Potential's symbols, i.e. Potential.symbols. """ if symbols is None: symbols = self.symbols if not isinstance(symbols, (list, tuple)): symbols = [symbols] masses = [] for symbol in symbols: for atom in self.__dm.iteraslist('atom'): if symbol == atom['symbol']: masses.append(atom['mass']) break assert len(symbols) == len(masses), 'Not all masses found!' return masses def pair_info(self, symbols = None): """ Return all LAMMPS input command lines associated with the Potential and a list of atom-model symbols. Arguments: symbols -- (optional) list of atom-model symbols being used. If None (default), will use all of the Potential's elements. """ #if no symbols supplied use all for potential if symbols is None: symbols = self.symbols if not isinstance(symbols, (list, tuple)): symbols = [symbols] #Generate mass lines masses = self.masses(symbols) mass = '' for i in xrange(len(masses)): mass += 'mass %i %f' % ( i+1, masses[i] ) + '\n' mass +='\n' #Generate pair_style line style = 'pair_style ' + self.__dm['pair_style']['type'] terms = self.__dm['pair_style'].get('term', None) style += self.__pair_terms(self.__dm['pair_style'].iteraslist('term')) + '\n' #Generate pair_coeff lines coeff = '' for coeff_line in self.__dm.iteraslist('pair_coeff'): if 'interaction' in coeff_line: interaction = coeff_line['interaction'].get('symbol', ['*', '*']) else: interaction = ['*', '*'] #Always include coeff lines that act on all atoms in the system if interaction == ['*', '*']: coeff_symbols = self.symbols coeff += 'pair_coeff * *' + self.__pair_terms(coeff_line.iteraslist('term'), symbols, coeff_symbols) + '\n' continue #Many-body potentials will contain a symbols term if len(coeff_line.finds('symbols')) > 0: many = True else: many = False #Treat as a many-body potential if many: coeff_symbols = interaction coeff += 'pair_coeff * *' + self.__pair_terms(coeff_line.iteraslist('term'), symbols, coeff_symbols) + '\n' #Treat as pair potential else: coeff_symbols = interaction assert len(coeff_symbols) == 2, 'Pair potential interactions need two listed elements' #Classic eam style is a special case if self.__dm['pair_style']['type'] == 'eam': assert coeff_symbols[0] == coeff_symbols[1], 'Only i==j interactions allowed for eam style' for i in xrange( len(symbols) ): if symbols[i] == coeff_symbols[0]: coeff += 'pair_coeff %i %i' % (i + 1, i + 1) + self.__pair_terms(coeff_line.iteraslist('term'), symbols, coeff_symbols) + '\n' #All other pair potentials else: for i in xrange( len(symbols) ): for j in xrange( i, len(symbols) ): if (symbols[i] == coeff_symbols[0] and symbols[j] == coeff_symbols[1]) or (symbols[i] == coeff_symbols[1] and symbols[j] == coeff_symbols[0]): coeff += 'pair_coeff %i %i' % (i + 1, j + 1) + self.__pair_terms(coeff_line.iteraslist('term'), symbols, coeff_symbols) + '\n' #generate additional command lines command = '' for command_line in self.__dm.iteraslist('command'): command += self.__pair_terms(command_line.iteraslist('term'), symbols, self.symbols).strip() + '\n' return mass + style + coeff + command def __pair_terms(self, terms, system_symbols = [], coeff_symbols = []): """utility function used by self.pair_info() for composing lines from terms""" line = '' for term in terms: for ttype, tval in term.iteritems(): #print options and parameters as strings if ttype == 'option' or ttype == 'parameter': line += ' ' + str(tval) #print files with pot_dir prefixes elif ttype == 'file': line += ' ' + str( os.path.join(self.pot_dir, tval) ) #print all symbols being used for symbolsList elif ttype == 'symbolsList' and (tval is True or tval == 'True'): for coeff_symbol in coeff_symbols: if coeff_symbol in system_symbols: line += ' ' + coeff_symbol #print symbols being used with model in appropriate order for symbols elif ttype == 'symbols' and (tval is True or tval == 'True'): for system_symbol in system_symbols: if system_symbol in coeff_symbols: line += ' ' + system_symbol else: line += ' NULL' return line
def load(model, key='atomic-system', index=0): """Read in a data model containing a crystal-structure and return a System unit cell.""" if isinstance(model, (str, unicode)) and os.path.isfile(model): with open(model) as f: model = f.read() #Pull system model out of data model using key and index a_sys = DataModelDict(model).finds(key) if len(a_sys) == 0: raise KeyError(key + ' not found in model') try: a_sys = a_sys[index] except: raise IndexError('Invalid index ' + str(index) + ' for model key ' + key) #identify the crystal system c_system = a_sys['cell'].keys()[0] cell = a_sys['cell'][c_system] if c_system == 'cubic': a = b = c = uc.value_unit(cell['a']) alpha = beta = gamma = 90.0 elif c_system == 'hexagonal': a = b = uc.value_unit(cell['a']) c = uc.value_unit(cell['c']) alpha = beta = 90.0 gamma = 120.0 elif c_system == 'tetragonal': a = b = uc.value_unit(cell['a']) c = uc.value_unit(cell['c']) alpha = beta = gamma = 90.0 elif c_system == 'trigonal' or c_system == 'rhombohedral': a = b = c = uc.value_unit(cell['a']) alpha = beta = gamma = cell['alpha'] elif c_system == 'orthorhombic': a = uc.value_unit(cell['a']) b = uc.value_unit(cell['b']) c = uc.value_unit(cell['c']) alpha = beta = gamma = 90.0 elif c_system == 'monoclinic': a = uc.value_unit(cell['a']) b = uc.value_unit(cell['b']) c = uc.value_unit(cell['c']) alpha = gamma = 90.0 beta = cell['beta'] elif c_system == 'triclinic': a = uc.value_unit(cell['a']) b = uc.value_unit(cell['b']) c = uc.value_unit(cell['c']) alpha = cell['alpha'] beta = cell['beta'] gamma = cell['gamma'] box = am.Box(a=a, b=b, c=c, alpha=alpha, beta=beta, gamma=gamma) #create list of atoms and list of elements atoms = [] scale = None prop = DataModelDict() all_atypes = np.array(a_sys.finds('component')) all_symbols = np.array(a_sys.finds('symbol')) all_elements = np.array(a_sys.finds('element')) if len(all_atypes) == 0: if len(all_symbols) != 0: symbols, atypes = np.unique(all_symbols, return_inverse) elif len(all_elements) != 0: symbols, atypes = np.unique(all_elements, return_inverse) else: raise ValueError('No atom components, symbols or elements listed') else: atypes = all_atypes symbols = [None for i in xrange(max(all_atypes))] if len(all_elements) != 0 and len(all_symbols) == 0: all_symbols = all_elements if len(all_symbols) != 0: assert len(all_symbols) == len(atypes) sym_dict = {} for atype, symbol in zip(atypes, all_symbols): if atype not in sym_dict: sym_dict[atype] = symbol else: assert sym_dict[atype] == symbol for atype, symbol in sym_dict.iteritems(): symbols[atype-1] = symbol prop['atype'] = atypes prop['pos'] = np.zeros((len(prop['atype']), 3), dtype='float64') count = 0 pos_units = [] for atom in a_sys.iteraslist('atom'): #read in pos for atom and unit info prop['pos'][count] = uc.value_unit(atom['position']) pos_units.append(atom['position'].get('unit', None)) #Add per-atom properties for property in atom.iteraslist('property'): if property['name'] not in prop: value = uc.value_unit(property) prop[property['name']] = np.zeros((len(prop['atype']), len(value)), dtype=value.dtype) prop[property['name']][count] = uc.value_unit(property) count += 1 pos_unit = np.unique(pos_units) assert len(pos_unit) == 1, 'Mixed units for positions' if pos_unit[0] == 'scaled': scale=True else: scale=False atoms = am.Atoms(natoms=len(prop['atype']), prop=prop) system = am.System(box=box, atoms=atoms, scale=scale) return system, symbols
class Potential(object): """ Class for building LAMMPS input lines from a potential-LAMMPS data model. """ def __init__(self, model, pot_dir=None): """ Initializes an instance associated with a potential-LAMMPS data model. Parameters ---------- model : str or file-like object A JSON/XML data model containing a potential-LAMMPS branch. pot_dir : str, optional The path to a directory containing any artifacts associated with the potential. Default value is None, which assumes any required files will be in the working directory when LAMMPS is executed. """ self.load(model, pot_dir) def load(self, model, pot_dir=None): """ Loads potential-LAMMPS data model info. Parameters ---------- model : str or file-like object A JSON/XML data model containing a potential-LAMMPS branch. pot_dir : str, optional The path to a directory containing any artifacts associated with the potential. Default value is None, which assumes any required files will be in the working directory when LAMMPS is executed. """ # Load model and find potential-LAMMPS self.__dm = DM(model).find('potential-LAMMPS') # Extract properties self.__id = self.__dm['id'] self.__key = self.__dm['key'] self.__potid = self.__dm['potential']['id'] self.__potkey = self.__dm['potential']['key'] self.__units = self.__dm['units'] self.__atom_style = self.__dm['atom_style'] self.__pair_style = self.__dm['pair_style']['type'] if pot_dir is not None: self.pot_dir = pot_dir else: self.pot_dir = '' # Build lists of symbols, elements and masses self.__elements = [] self.__symbols = [] self.__masses = [] self.__charges = [] for atom in self.__dm.iteraslist('atom'): element = atom.get('element', None) symbol = atom.get('symbol', None) mass = atom.get('mass', None) charge = float(atom.get('charge', 0.0)) # Check if element is listed if element is None: if mass is None: raise KeyError('mass is required for each atom if element is not listed') if symbol is None: raise KeyError('symbol is required for each atom if element is not listed') else: element = symbol # Check if symbol is listed. if symbol is None: symbol = element # Check if mass is listed. if mass is None: mass = atomic_mass(element) else: mass = float(mass) # Add values to the lists self.__elements.append(element) self.__symbols.append(symbol) self.__masses.append(mass) self.__charges.append(charge) def __str__(self): """str: The string of the Potential returns its human-readable id""" return self.id @property def pot_dir(self): """str : The directory containing files associated with a given potential.""" return self.__pot_dir @pot_dir.setter def pot_dir(self, value): self.__pot_dir = str(value) @property def id(self): """str : Human-readable identifier for the LAMMPS implementation.""" return self.__id @property def key(self): """str : uuid hash-key for the LAMMPS implementation.""" return self.__key @property def potid(self): """str : Human-readable identifier for the potential model.""" return self.__potid @property def potkey(self): """str : uuid hash-key for the potential model.""" return self.__potkey @property def units(self): """str : LAMMPS units option.""" return self.__units @property def atom_style(self): """str : LAMMPS atom_style option.""" return self.__atom_style @property def symbols(self): """list of str : All atom-model symbols.""" return self.__symbols @property def pair_style(self): return self.__pair_style def elements(self, symbols=None): """ Returns a list of element names associated with atom-model symbols. Parameters ---------- symbols : list of str, optional A list of atom-model symbols. If None (default), will use all of the potential's symbols. Returns ------- list of str The str element symbols corresponding to the atom-model symbols. """ # Return all elements if symbols is None if symbols is None: return self.__elements # Convert symbols to a list if needed if not isinstance(symbols, (list, tuple)): symbols = [symbols] # Get all matching elements elements = [] for symbol in symbols: i = self.symbols.index(symbol) elements.append(self.__elements[i]) return elements def masses(self, symbols=None): """ Returns a list of atomic/ionic masses associated with atom-model symbols. Parameters ---------- symbols : list of str, optional A list of atom-model symbols. If None (default), will use all of the potential's symbols. Returns ------- list of float The atomic/ionic masses corresponding to the atom-model symbols. """ # Return all masses if symbols is None if symbols is None: return self.__masses # Convert symbols to a list if needed if not isinstance(symbols, (list, tuple)): symbols = [symbols] # Get all matching masses masses = [] for symbol in symbols: i = self.symbols.index(symbol) masses.append(self.__masses[i]) return masses def charges(self, symbols=None): """ Returns a list of atomic charges associated with atom-model symbols. Will have a None value if not assigned. Parameters ---------- symbols : list of str, optional A list of atom-model symbols. If None (default), will use all of the potential's symbols. Returns ------- list of float The atomic charges corresponding to the atom-model symbols. """ # Return all charges if symbols is None if symbols is None: return self.__charges # Convert symbols to a list if needed if not isinstance(symbols, (list, tuple)): symbols = [symbols] # Get all matching charges charges = [] for symbol in symbols: i = self.symbols.index(symbol) charges.append(self.__charges[i]) return charges def pair_info(self, symbols=None): """ Generates the LAMMPS input command lines associated with the Potential and a list of atom-model symbols. Parameters ---------- symbols : list of str, optional List of atom-model symbols corresponding to the unique atom types in a system. If None (default), then all atom-model symbols will be included in the order that they are listed in the data model. Returns ------- str The LAMMPS input command lines that specifies the potential. """ # If no symbols supplied use all for potential if symbols is None: symbols = self.symbols masses = self.__masses else: # Convert symbols to a list if needed if not isinstance(symbols, (list, tuple)): symbols = [symbols] masses = self.masses(symbols) # Generate mass lines mass = '' for i in range(len(masses)): mass += 'mass %i %f' % ( i+1, masses[i] ) + '\n' mass +='\n' # Generate pair_style line style = 'pair_style ' + self.pair_style terms = self.__dm['pair_style'].aslist('term') style += self.__pair_terms(terms) + '\n' # Generate pair_coeff lines coeff = '' for coeff_line in self.__dm.iteraslist('pair_coeff'): if 'interaction' in coeff_line: interaction = coeff_line['interaction'].get('symbol', ['*', '*']) else: interaction = ['*', '*'] # Always include coeff lines that act on all atoms in the system if interaction == ['*', '*']: coeff_symbols = self.symbols coeff += 'pair_coeff * *' coeff += self.__pair_terms(coeff_line.iteraslist('term'), symbols, coeff_symbols) + '\n' continue # Many-body potentials will contain a symbols term if len(coeff_line.finds('symbols')) > 0: many = True else: many = False # Treat as a many-body potential if many: coeff_symbols = interaction coeff += 'pair_coeff * *' coeff += self.__pair_terms(coeff_line.iteraslist('term'), symbols, coeff_symbols) + '\n' # Treat as pair potential else: coeff_symbols = interaction if len(coeff_symbols) != 2: raise ValueError('Pair potential interactions need two listed elements') # Classic eam style is a special case if self.pair_style == 'eam': if coeff_symbols[0] != coeff_symbols[1]: raise ValueError('Only i==j interactions allowed for eam style') for i in range(len(symbols)): if symbols[i] == coeff_symbols[0]: coeff += 'pair_coeff %i %i' % (i + 1, i + 1) coeff += self.__pair_terms(coeff_line.iteraslist('term'), symbols, coeff_symbols) + '\n' # All other pair potentials else: for i in range(len(symbols)): for j in range(i, len(symbols)): if ((symbols[i] == coeff_symbols[0] and symbols[j] == coeff_symbols[1]) or (symbols[i] == coeff_symbols[1] and symbols[j] == coeff_symbols[0])): coeff += 'pair_coeff %i %i' % (i + 1, j + 1) coeff += self.__pair_terms(coeff_line.iteraslist('term'), symbols, coeff_symbols) + '\n' # Generate additional command lines command = '' for command_line in self.__dm.iteraslist('command'): command += self.__pair_terms(command_line.iteraslist('term'), symbols, self.symbols).strip() + '\n' return mass + style + coeff + command def __pair_terms(self, terms, system_symbols = [], coeff_symbols = []): """utility function used by self.pair_info() for composing lines from terms""" line = '' for term in terms: for ttype, tval in iteritems(term): # Print options and parameters as strings if ttype == 'option' or ttype == 'parameter': line += ' ' + str(tval) # Print files with pot_dir prefixes elif ttype == 'file': line += ' ' + str(os.path.join(self.pot_dir, tval)) # Print all symbols being used for symbolsList elif ttype == 'symbolsList' and (tval is True or tval == 'True'): for coeff_symbol in coeff_symbols: if coeff_symbol in system_symbols: line += ' ' + coeff_symbol # Print symbols being used with model in appropriate order for symbols elif ttype == 'symbols' and (tval is True or tval == 'True'): for system_symbol in system_symbols: if system_symbol in coeff_symbols: line += ' ' + system_symbol else: line += ' NULL' return line
def load(self, model, pot_dir=None): """ Loads potential-LAMMPS data model info. Parameters ---------- model : str or file-like object A JSON/XML data model containing a potential-LAMMPS branch. pot_dir : str, optional The path to a directory containing any artifacts associated with the potential. Default value is None, which assumes any required files will be in the working directory when LAMMPS is executed. """ # Load model and find potential-LAMMPS self.__dm = DM(model).find('potential-LAMMPS') # Extract properties self.__id = self.__dm['id'] self.__key = self.__dm['key'] self.__potid = self.__dm['potential']['id'] self.__potkey = self.__dm['potential']['key'] self.__units = self.__dm['units'] self.__atom_style = self.__dm['atom_style'] self.__pair_style = self.__dm['pair_style']['type'] if pot_dir is not None: self.pot_dir = pot_dir else: self.pot_dir = '' # Build lists of symbols, elements and masses self.__elements = [] self.__symbols = [] self.__masses = [] self.__charges = [] for atom in self.__dm.iteraslist('atom'): element = atom.get('element', None) symbol = atom.get('symbol', None) mass = atom.get('mass', None) charge = float(atom.get('charge', 0.0)) # Check if element is listed if element is None: if mass is None: raise KeyError('mass is required for each atom if element is not listed') if symbol is None: raise KeyError('symbol is required for each atom if element is not listed') else: element = symbol # Check if symbol is listed. if symbol is None: symbol = element # Check if mass is listed. if mass is None: mass = atomic_mass(element) else: mass = float(mass) # Add values to the lists self.__elements.append(element) self.__symbols.append(symbol) self.__masses.append(mass) self.__charges.append(charge)
def structure_static(xml_lib_dir): calc_name = 'structure_static' groups = os.path.join(xml_lib_dir, '*', calc_name, '*') error_dict = DataModelDict() for group_dir in glob.iglob(groups): if os.path.isdir(group_dir): calc_dir, group_name = os.path.split(group_dir) pot_name = os.path.basename(os.path.dirname(calc_dir)) print pot_name try: with open(os.path.join(calc_dir, 'badlist.txt'), 'r') as f: badlist = f.read().split() except: badlist = [] data = DataModelDict() for sim_path in glob.iglob(os.path.join(group_dir, '*.xml')): sim_file = os.path.basename(sim_path) sim_name = sim_file[:-4] if sim_name in badlist: continue with open(sim_path) as f: sim = DataModelDict(f)['calculation-crystal-phase'] if 'error' in sim: badlist.append(sim_name) error_message = sim['error'] error = 'Unknown error' for line in error_message.split('\n'): if 'Error' in line: error = line error_dict.append(error, sim_name) continue try: cell = sim['relaxed-atomic-system']['cell'] except: tar_gz_path = sim_path[:-4] + '.tar.gz' if os.isfile(tar_gz_path): error_dict.append('Unknown error', sim_name) continue data.append('key', sim.get('calculation-id', '')) data.append('file', sim['crystal-info'].get('artifact', '')) data.append('symbols', '_'.join(sim['crystal-info'].aslist('symbols'))) data.append('Temperature (K)', sim['phase-state']['temperature']['value']) data.append('Pressure (GPa)', sim['phase-state']['pressure']['value']) cell = cell[cell.keys()[0]] data.append('Ecoh (eV)', sim['cohesive-energy']['value'] ) if 'a' in cell: data.append('a (A)', cell['a']['value']) else: data.append('a (A)', '') if 'b' in cell: data.append('b (A)', cell['b']['value']) else: data.append('b (A)', '') if 'c' in cell: data.append('c (A)', cell['c']['value']) else: data.append('c (A)', '') C_dict = {} for C in sim['elastic-constants'].iteraslist('C'): C_dict[C['ij']] = C['stiffness']['value'] data.append('C11 (GPa)', C_dict.get('1 1', '')) data.append('C22 (GPa)', C_dict.get('2 2', '')) data.append('C33 (GPa)', C_dict.get('3 3', '')) data.append('C12 (GPa)', C_dict.get('1 2', '')) data.append('C13 (GPa)', C_dict.get('1 3', '')) data.append('C23 (GPa)', C_dict.get('2 3', '')) data.append('C44 (GPa)', C_dict.get('4 4', '')) data.append('C55 (GPa)', C_dict.get('5 5', '')) data.append('C66 (GPa)', C_dict.get('6 6', '')) if len(data.keys()) > 0: with open(os.path.join(calc_dir, 'structure_static_'+group_name+'.csv'), 'w') as f: f.write(','.join(data.keys())+'\n') for i in xrange(len(data.aslist('key'))): f.write(','.join([str(data.aslist(k)[i]) for k in data.keys()]) + '\n') with open(os.path.join(calc_dir, 'badlist.txt'), 'w') as f: for bad in badlist: f.write(bad+'\n')
def model(self, **kwargs): """ Return a DataModelDict 'cell' representation of the system Keyword Arguments: box_unit -- length unit to use for the box. Default is angstrom. symbols -- list of atom-model symbols corresponding to the atom types. elements -- list of element tags corresponding to the atom types. prop_units -- dictionary where the keys are the property keys to include, and the values are units to use. If not given, only the positions in scaled units are included. """ box_unit = kwargs.get('box_unit', 'angstrom') symbols = kwargs.get('symbols', [None for i in xrange(self.natypes)]) if not isinstance(symbols, list): symbols = [symbols] assert len(symbols) == self.natypes, 'Number of symbols does not match number of atom types' elements = kwargs.get('elements', [None for i in xrange(self.natypes)]) if not isinstance(elements, list): elements = [elements] assert len(elements) == self.natypes, 'Number of elements does not match number of atom types' prop_units = kwargs.get('prop_units', {}) if 'pos' not in prop_units: prop_units['pos'] = 'scaled' a = uc.get_in_units(self.box.a, box_unit) b = uc.get_in_units(self.box.b, box_unit) c = uc.get_in_units(self.box.c, box_unit) alpha = self.box.alpha beta = self.box.beta gamma = self.box.gamma model = DM() model['cell'] = cell = DM() if np.allclose([alpha, beta, gamma], [90.0, 90.0, 90.0]): if np.isclose(b/a, 1.): if np.isclose(c/a, 1.): c_family = 'cubic' cell[c_family] = DM() cell[c_family]['a'] = DM([('value', (a+b+c)/3), ('unit', box_unit)]) else: c_family = 'tetragonal' cell[c_family] = DM() cell[c_family]['a'] = DM([('value', (a+b)/2), ('unit', box_unit)]) cell[c_family]['c'] = DM([('value', c), ('unit', box_unit)]) else: #if np.isclose(b/a, 3.0**0.5): # c_family = 'hexagonal' # cell[c_family] = DM() # a_av = (a + b/(3.0**0.5))/2. # cell[c_family]['a'] = DM([('value', a_av), ('unit', box_unit)]) # cell[c_family]['c'] = DM([('value', c), ('unit', box_unit)]) #else: c_family = 'orthorhombic' cell[c_family] = DM() cell[c_family]['a'] = DM([('value', a), ('unit', box_unit)]) cell[c_family]['b'] = DM([('value', b), ('unit', box_unit)]) cell[c_family]['c'] = DM([('value', c), ('unit', box_unit)]) else: raise ValueError('Non-orthogonal boxes comming') for i in xrange(self.natoms): atom = DM() atom['component'] = int(self.atoms_prop(a_id=i, key='atype')) symbol = symbols[self.atoms_prop(a_id=i, key='atype')-1] if symbol is not None: atom['symbol'] = symbol element = elements[self.atoms_prop(a_id=i, key='atype')-1] if element is not None: atom['element'] = element atom['position'] = DM() if prop_units['pos'] == 'scaled': atom['position']['value'] = list(self.atoms_prop(a_id=i, key='pos', scale=True)) else: atom['position']['value'] = list(uc.get_in_units(self.atoms_prop(a_id=i, key='pos'), prop_units['pos'])) atom['position']['unit'] = prop_units['pos'] for key, unit in prop_units.iteritems(): if key != 'pos' and key != 'atype': value = uc.get_in_units(self.atoms_prop(a_id=i, key=key), unit) try: value = list(value) except: pass prop = DM([('name', key), ('value', value), ('unit', unit)]) atom.append('property', prop) model.append('atom', atom) return DM([('atomic-system', model)])
def set_database(name=None, style=None, host=None): """ Allows for database information to be defined in the settings file. Screen prompts will be given to allow any necessary database parameters to be entered. Parameters ---------- name : str, optional The name to assign to the database. If not given, the user will be prompted to enter one. style : str, optional The database style associated with the database. If not given, the user will be prompted to enter one. host : str, optional The database host (directory path or url) where the database is located. If not given, the user will be prompted to enter one. """ # Get information from the settings file settings = load_settings() # Find existing database definitions databases = settings['iprPy-defined-parameters'].aslist('database') # Ask for name if not given if name is None: name = screen_input('Enter a name for the database:') # Load database if it exists try: database_settings = settings.find('database', yes={'name':name}) #Create new database entry if it doesn't exist except: database_settings = DM() settings['iprPy-defined-parameters'].append('database', database_settings) database_settings['name'] = name # Ask if existing database should be overwritten else: print('Database', name, 'already defined.') option = screen_input('Overwrite? (yes or no):') if option in ['yes', 'y']: pass elif option in ['no', 'n']: return None else: raise ValueError('Invalid choice') # Ask for style if not given if style is None: style = screen_input("Enter the database's style:") database_settings['style'] = style #Ask for host if not given if host is None: host = screen_input("Enter the database's host:") database_settings['host'] = host print('Enter any other database parameters as key, value') print('Exit by leaving key blank') while True: key = screen_input('key:') if key == '': break value = screen_input('value:') database_settings.append('params', DM([(key, value)])) save_settings(settings)
def model(self, **kwargs): """ Return or set DataModelDict representation of the elastic constants. Keyword Arguments: model -- string or file-like object of json/xml model or DataModelDict. unit -- units to give values in. Default is None. crystal_system -- crystal system representation. Default is triclinic. If model is given, then model is converted into a DataModelDict and the elastic constants are read in if the model contains exactly one 'elastic-constants' branch. If model is not given, then a DataModelDict for the elastic constants is constructed. The values included will depend on the crystal system, and will be converted to the specified units. """ #Set values if model given if 'model' in kwargs: assert len(kwargs) == 1, 'no keyword arguments supported with model reading' model = DM(kwargs['model']).find('elastic-constants') c_dict = {} for C in model['C']: key = 'C' + C['ij'][0] + C['ij'][2] c_dict[key] = uc.value_unit(C['stiffness']) self.Cij = ElasticConstants(**c_dict).Cij #Return DataModelDict if model not given else: unit = kwargs.pop('unit', None) crystal_system = kwargs.pop('crystal_system', 'triclinic') assert len(kwargs) == 0, 'Invalid arguments' model = DM() model['elastic-constants'] = DM() model['elastic-constants']['C'] = C = [] c = uc.get_in_units(self.Cij, unit) c_dict = DM() if crystal_system == 'cubic': c_dict['1 1'] = (c[0,0] + c[1,1] + c[2,2]) / 3 c_dict['1 2'] = (c[0,1] + c[0,2] + c[1,2]) / 3 c_dict['4 4'] = (c[3,3] + c[4,4] + c[5,5]) / 3 elif crystal_system == 'hexagonal': c_dict['1 1'] = (c[0,0] + c[1,1]) / 2 c_dict['3 3'] = c[2,2] c_dict['1 2'] = (c[0,1] + (c[0,0] - 2*c[5,5])) / 2 c_dict['1 3'] = (c[0,2] + c[1,2]) / 2 c_dict['4 4'] = (c[3,3] + c[4,4]) / 2 elif crystal_system == 'tetragonal': c_dict['1 1'] = (c[0,0] + c[1,1]) / 2 c_dict['3 3'] = c[2,2] c_dict['1 2'] = c[0,1] c_dict['1 3'] = (c[0,2] + c[1,2]) / 2 c_dict['1 6'] = (c[0,5] - c[1,5]) / 2 c_dict['4 4'] = (c[3,3] + c[4,4]) / 2 c_dict['6 6'] = c[5,5] elif crystal_system == 'orthorhombic': c_dict['1 1'] = c[0,0] c_dict['2 2'] = c[1,1] c_dict['3 3'] = c[2,2] c_dict['1 2'] = c[0,1] c_dict['1 3'] = c[0,2] c_dict['2 3'] = c[1,2] c_dict['4 4'] = c[3,3] c_dict['5 5'] = c[4,4] c_dict['6 6'] = c[5,5] else: c_dict['1 1'] = c[0,0] c_dict['1 2'] = c[0,1] c_dict['1 3'] = c[0,2] c_dict['1 4'] = c[0,3] c_dict['1 5'] = c[0,4] c_dict['1 6'] = c[0,5] c_dict['2 2'] = c[1,1] c_dict['2 3'] = c[1,2] c_dict['2 4'] = c[1,3] c_dict['2 5'] = c[1,4] c_dict['2 6'] = c[1,5] c_dict['3 3'] = c[2,2] c_dict['3 4'] = c[2,3] c_dict['3 5'] = c[2,4] c_dict['3 6'] = c[2,5] c_dict['4 4'] = c[3,3] c_dict['4 5'] = c[3,4] c_dict['4 6'] = c[3,5] c_dict['5 5'] = c[4,4] c_dict['5 6'] = c[4,5] c_dict['6 6'] = c[5,5] for ij, value in c_dict.iteritems(): C.append(DM([('stiffness', DM([ ('value', value), ('unit', unit) ])), ('ij', ij) ]) ) return model
def runner(dbase, run_directory, orphan_directory=None, hold_directory=None): """ High-throughput calculation runner. Parameters ---------- dbase : iprPy.Database The database to interact with. run_directory : str The path to the directory where the calculation instances to run are located. orphan_directory : str, optional The path for the orphan directory where incomplete calculations are moved. If None (default) then will use 'orphan' at the same level as the run_directory. hold_directory : str, optional The path for the hold directory where tar archives that failed to be uploaded are moved to. If None (default) then will use 'hold' at the same level as the run_directory. """ # Get path to Python executable running this script py_exe = sys.executable if py_exe is None: py_exe = 'python' # Get absolute path to run_directory run_directory = os.path.abspath(run_directory) # Get original working directory original_dir = os.getcwd() # Define runner log file d = datetime.datetime.now() pid = os.getpid() runner_log_dir = os.path.join(os.path.dirname(rootdir), 'runner-logs') if not os.path.isdir(runner_log_dir): os.makedirs(runner_log_dir) log_file = os.path.join(runner_log_dir, '%04i-%02i-%02i-%02i-%02i-%06i-%i.log' % (d.year, d.month, d.day, d.minute, d.second, d.microsecond, pid)) # Set default orphan_directory if orphan_directory is None: orphan_directory = os.path.join(os.path.dirname(run_directory), 'orphan') # Set default orphan_directory if hold_directory is None: hold_directory = os.path.join(os.path.dirname(run_directory), 'hold') # Start runner log file with open(log_file, 'a') as log: # Change to the run directory os.chdir(run_directory) # Initialize bidfailcount counter bidfailcount = 0 # Announce the runner's pid print('Runner started with pid', pid) sys.stdout.flush() # flist is the running list of calculations flist = os.listdir(run_directory) while len(flist) > 0: # Pick a random calculation from the list index = random.randint(0, len(flist)-1) sim = flist[index] # Submit a bid and check if it succeeded if bid(sim): # Reset bidfailcount bidfailcount = 0 # Move to simulation directory os.chdir(sim) log.write('%s\n' % sim) # Check that the calculation has calc_*.py, calc_*.in and # record in the database try: record = dbase.get_record(name=sim) calc_py = get_file('calc_*.py') calc_in = get_file('calc_*.in') # Pass ConnectionErrors forward killing runner except requests.ConnectionError as e: raise requests.ConnectionError(e) # If not complete, zip and move to the orphan directory except: log.write('Incomplete simulation: moved to orphan directory\n\n') os.chdir(run_directory) if not os.path.isdir(orphan_directory): os.makedirs(orphan_directory) shutil.make_archive(os.path.join(orphan_directory, sim), 'gztar', root_dir=run_directory, base_dir=sim) removecalc(os.path.join(run_directory, sim)) flist = os.listdir(run_directory) continue # Check if any files in the calculation folder are incomplete # records error_flag = False ready_flag = True for fname in glob.iglob('*'): parent_sim, ext = os.path.splitext(os.path.basename(fname)) if ext in ('.json', '.xml'): parent = DM(fname) try: status = parent.find('status') # Check parent record in database to see if it has completed if status == 'not calculated': parent_record = dbase.get_record(name=parent_sim) try: status = parent_record.content.find('status') # Mark flag if still incomplete if status == 'not calculated': ready_flag = False break # Skip if parent calculation failed elif status == 'error': with open(os.path.basename(fname), 'w') as f: parent_record.content.json(fp=f, indent=4) error_flag = True error_message = 'parent calculation issued an error' break # Ignore if unknown status else: raise ValueError('unknown status') # Copy parent record to calculation folder if it is now complete except: with open(os.path.basename(fname), 'w') as f: parent_record.content.json(fp=f, indent=4) log.write('parent %s copied to sim folder\n' % parent_sim) # skip if parent calculation failed elif status == 'error': error_flag = True error_message = 'parent calculation issued an error' break except: continue # Handle calculations that have unfinished parents if not ready_flag: bid_files = glob.glob('*.bid') os.chdir(run_directory) for bid_file in bid_files: os.remove(os.path.join(sim, bid_file)) flist = [parent_sim] log.write('parent %s not ready\n\n' % parent_sim) continue # Run the calculation try: assert not error_flag, error_message run = subprocess.Popen([py_exe, calc_py, calc_in, sim], stderr=subprocess.PIPE) error_message = run.stderr.read() # Load results.json try: model = DM('results.json') # Throw errors if no results.json except: error_flag = True assert not error_flag, error_message log.write('sim calculated successfully\n') # Catch any errors and build results.json except: model = record.content keys = list(model.keys()) record_type = keys[0] model[record_type]['status'] = 'error' model[record_type]['error'] = str(sys.exc_info()[1]) with open('results.json', 'w') as f: model.json(fp=f, indent=4) log.write('error: %s\n' % model[record_type]['error']) # Read in results.json #model = DM('results.json') # Update record tries = 0 while tries < 10: try: dbase.update_record(content=model, name=sim) break except: tries += 1 if tries == 10: os.chdir(run_directory) log.write('failed to update record\n') else: # Archive calculation and add to database or hold_directory try: dbase.add_tar(root_dir=run_directory, name=sim) except: log.write('failed to upload archive\n') if not os.path.isdir(hold_directory): os.makedirs(hold_directory) shutil.move(sim+'.tar.gz', hold_directory) os.chdir(run_directory) removecalc(os.path.join(run_directory, sim)) log.write('\n') # Else if bid(sim) failed else: bidfailcount += 1 # Stop unproductive worker after 10 consecutive bid fails if bidfailcount > 10: print("Didn't find an open simulation") break # Pause for 10 seconds before trying again time.sleep(10) # Regenerate flist and flush log file flist = os.listdir(run_directory) log.flush() os.fsync(log.fileno()) print('No simulations left to run') os.chdir(original_dir)
def set_crystal(terms, crystals, elements): """Interpret crystal arguments.""" new_list = [] if len(terms) >= 3 and terms[0] == 'add': if terms[1] == 'prototypes': try: i = terms.index('only') except: i = len(terms) try: path = os.path.abspath(os.path.realpath(' '.join(terms[2:i]))) assert os.path.isdir(path) except: raise ValueError(path + ' is not an accessible directory') names = terms[i+1:] for fname in os.listdir(path): try: crystal = os.path.join(path, fname) with open(crystal) as f: model = DataModelDict(f) natypes = atomman.models.crystal(model)[0].natypes model.find('crystal-prototype') except: continue if len(names) > 0: print names match = False for name in names: print name if name in model['atom-system']['identifier'].values(): match = True break if not match: continue if crystal in crystals: elements[crystals.index(crystal)] = ['*' for i in xrange(natypes)] else: crystals.append(crystal) elements.append(['*' for i in xrange(natypes)]) elif terms[1] == 'prototype': try: i = terms.index('elements') except: i = len(terms) try: path = os.path.abspath(os.path.realpath(' '.join(terms[2:i]))) assert os.path.isfile(path) except: raise ValueError(path + ' is not an accessible file') element = terms[i+1:] try: crystal = path with open(crystal) as f: model = DataModelDict(f) natypes = atomman.models.crystal(model)[0].natypes model.find('crystal-prototype') except: raise ValueError(path + ' is not a crystal prototype file') if len(element) == 0: element = ['*' for i in xrange(natypes)] elif len(element) != natypes: raise ValueError('number of elements (%i) and number of atom types (%i) do not match' % (len(element), natypes)) if crystal in crystals: elements[crystals.index(crystal)] = element else: crystals.append(crystal) elements.append(element) elif terms[1] == 'cifs': try: i = terms.index('only') except: i = len(terms) try: path = os.path.abspath(os.path.realpath(' '.join(terms[2:i]))) assert os.path.isdir(path) except: raise ValueError(path + ' is not an accessible directory') names = terms[i+1:] for fname in os.listdir(path): if len(names) > 0: if fname[:-4] not in names: continue try: crystal = os.path.join(path, fname) with open(crystal) as f: element = atomman.models.cif_cell(f)[1] if not isinstance(element, list): [element] except: continue if crystal in crystals: elements[crystals.index(crystal)] = element else: crystals.append(crystal) elements.append(element) elif terms[1] == 'cif': try: i = terms.index('elements') except: i = len(terms) try: path = os.path.abspath(os.path.realpath(' '.join(terms[2:i]))) assert os.path.isfile(path) except: raise ValueError(path + ' is not an accessible file') if True: crystal = path with open(crystal) as f: element = atomman.models.cif_cell(f)[1] natypes = len(element) else: raise ValueError(path + ' is not a cif file') if len(terms[i+1:]) > 0: element = terms[i+1:] if len(element) != natypes: raise ValueError('number of elements (%i) and number of atom types (%i) do not match' % (len(element), natypes)) if crystal in crystals: elements[crystals.index(crystal)] = element else: crystals.append(crystal) elements.append(element) else: raise ValueError('invalid crystal argument') elif len(terms) == 1 and terms[0] == 'clear': proto_list = [] else: raise ValueError('invalid crystal argument') return crystals, elements
def model(self, **kwargs): """ Return a DataModelDict 'cell' representation of the system Keyword Arguments: box_unit -- length unit to use for the box. Default is angstrom. symbols -- list of atom-model symbols corresponding to the atom types. elements -- list of element tags corresponding to the atom types. prop_units -- dictionary where the keys are the property keys to include, and the values are units to use. If not given, only the positions in scaled units are included. a_std, b_std, c_std -- standard deviation of lattice constants values to include as value errors. """ box_unit = kwargs.get('box_unit', 'angstrom') symbols = kwargs.get('symbols', [None for i in xrange(self.natypes)]) if not isinstance(symbols, list): symbols = [symbols] assert len(symbols) == self.natypes, 'Number of symbols does not match number of atom types' elements = kwargs.get('elements', [None for i in xrange(self.natypes)]) if not isinstance(elements, list): elements = [elements] assert len(elements) == self.natypes, 'Number of elements does not match number of atom types' prop_units = kwargs.get('prop_units', {}) if 'pos' not in prop_units: prop_units['pos'] = 'scaled' a = self.box.a b = self.box.b c = self.box.c alpha = self.box.alpha beta = self.box.beta gamma = self.box.gamma if 'a_std' in kwargs and 'b_std' in kwargs and 'c_std' in kwargs: errors = True a_std = kwargs['a_std'] b_std = kwargs['b_std'] c_std = kwargs['c_std'] else: errors = False model = DM() model['cell'] = cell = DM() # Test for orthorhombic angles if np.allclose([alpha, beta, gamma], [90.0, 90.0, 90.0]): if np.isclose(b/a, 1.): if np.isclose(c/a, 1.): # For cubic (a = b = c) c_family = 'cubic' cell[c_family] = DM() cell[c_family]['a'] = DM() a_ave = (a + b + c) / 3 cell[c_family]['a']['value'] = uc.get_in_units(a_ave, box_unit) if errors is True: a_std_ave = (a_std + b_std + c_std) / 3 cell[c_family]['a']['error'] = uc.get_in_units(a_std_ave, box_unit) cell[c_family]['a']['unit'] = box_unit else: # For tetrahedral (a = b != c) c_family = 'tetragonal' cell[c_family] = DM() cell[c_family]['a'] = DM() cell[c_family]['c'] = DM() a_ave = (a + b) / 2 cell[c_family]['a']['value'] = uc.get_in_units(a_ave, box_unit) cell[c_family]['c']['value'] = uc.get_in_units(c, box_unit) if errors is True: a_std_ave = (a_std + b_std) / 2 cell[c_family]['a']['error'] = uc.get_in_units(a_std_ave, box_unit) cell[c_family]['c']['error'] = uc.get_in_units(c_std, box_unit) cell[c_family]['a']['unit'] = box_unit cell[c_family]['c']['unit'] = box_unit else: # For orthorhombic (a != b != c) c_family = 'orthorhombic' cell[c_family] = DM() cell[c_family]['a'] = DM() cell[c_family]['b'] = DM() cell[c_family]['c'] = DM() cell[c_family]['a']['value'] = uc.get_in_units(a, box_unit) cell[c_family]['b']['value'] = uc.get_in_units(b, box_unit) cell[c_family]['c']['value'] = uc.get_in_units(c, box_unit) if errors is True: cell[c_family]['a']['error'] = uc.get_in_units(a_std, box_unit) cell[c_family]['b']['error'] = uc.get_in_units(b_std, box_unit) cell[c_family]['c']['error'] = uc.get_in_units(c_std, box_unit) cell[c_family]['a']['unit'] = box_unit cell[c_family]['b']['unit'] = box_unit cell[c_family]['c']['unit'] = box_unit else: raise ValueError('Non-orthogonal boxes comming') for i in xrange(self.natoms): atom = DM() atom['component'] = int(self.atoms_prop(a_id=i, key='atype')) symbol = symbols[self.atoms_prop(a_id=i, key='atype')-1] if symbol is not None: atom['symbol'] = symbol element = elements[self.atoms_prop(a_id=i, key='atype')-1] if element is not None: atom['element'] = element atom['position'] = DM() if prop_units['pos'] == 'scaled': atom['position']['value'] = list(self.atoms_prop(a_id=i, key='pos', scale=True)) else: atom['position']['value'] = list(uc.get_in_units(self.atoms_prop(a_id=i, key='pos'), prop_units['pos'])) atom['position']['unit'] = prop_units['pos'] for key, unit in prop_units.iteritems(): if key != 'pos' and key != 'atype': value = uc.get_in_units(self.atoms_prop(a_id=i, key=key), unit) try: value = list(value) except: pass prop = DM([('name', key), ('value', value), ('unit', unit)]) atom.append('property', prop) model.append('atom', atom) return DM([('atomic-system', model)])
def main(): to_run_dir = 'C:/users/lmh1/Documents/iprPy_run/to_run' xml_dir = 'C:/users/lmh1/Documents/iprPy_run/xml_library' orphan_dir = os.path.join(xml_dir, 'orphan') os.chdir(to_run_dir) flist = os.listdir(to_run_dir) while len(flist) > 0: index = random.randint(0, len(flist)-1) sim = flist[index] if bid(sim): os.chdir(sim) try: calc_py = None calc_in = None calc_name = None pot_name = None #find calc_*.py and calc_*.in files for fname in os.listdir(os.getcwd()): if fname[:5] == 'calc_': if fname[-3:] == '.py': if calc_py is None: calc_py = fname calc_name = fname[5:-3] else: raise ValueError('folder has multiple calc_*.py scripts') elif fname[-3:] == '.in': if calc_in is None: calc_in = fname else: raise ValueError('folder has multiple calc_*.in scripts') elif fname[-5:] == '.json' or fname[-4:] == '.xml': try: with open(fname) as f: test = DataModelDict(f) pot_name = test.find('LAMMPS-potential')['potential']['id'] except: pass assert pot_name is not None, 'LAMMPS-potential data model not found' assert calc_py is not None, 'calc_*.py script not found' assert calc_py is not None, 'calc_*.in script not found' except: print sim, sys.exc_info()[1] os.chdir(to_run_dir) if not os.path.isdir(orphan_dir): os.makedirs(orphan_dir) shutil.make_archive(os.path.join(orphan_dir, sim), 'gztar', root_dir=to_run_dir, base_dir=sim) shutil.rmtree(os.path.join(to_run_dir, sim)) flist = os.listdir(to_run_dir) continue pot_xml_dir = os.path.join(xml_dir, pot_name, calc_name, 'standard') try: run = subprocess.Popen(['python', calc_py, calc_in, sim], stderr=subprocess.PIPE) err_mess = run.stderr.read() if err_mess != '': raise RuntimeError(err_mess) except: with open(os.path.join(pot_xml_dir, sim + '.xml')) as f: model = DataModelDict(f) key = model.keys()[0] model[key]['error'] = str(sys.exc_info()[1]) with open('results.json', 'w') as f: model.json(fp=f, indent=4) with open('results.json') as f: model = DataModelDict(f) with open(os.path.join(pot_xml_dir, sim + '.xml'), 'w') as f: model.xml(fp=f, indent=4) os.chdir(to_run_dir) shutil.make_archive(os.path.join(pot_xml_dir, sim), 'gztar', root_dir=to_run_dir, base_dir=sim) shutil.rmtree(os.path.join(to_run_dir, sim)) flist = os.listdir(to_run_dir)
def dump(system, **kwargs): """ Return a DataModelDict 'cell' representation of the system. Parameters ---------- system : atomman.System The system to generate the data model for. f : str or file-like object, optional File path or file-like object to write the content to. If not given, then the content is returned as a DataModelDict. format : str, optional File format 'xml' or 'json' to save the content as if f is given. If f is a filename, then the format will be automatically inferred from f's extension. If format is not given and cannot be inferred, then it will be set to 'json'. indent : int or None, optional Indentation option to use for XML/JSON content if f is given. A value of None (default) will add no line separatations or indentations. box_unit : str, optional Length unit to use for the box. Default value is 'angstrom'. symbols : list, optional list of atom-model symbols corresponding to the atom types. If not given, will use system.symbols. elements : list, optional list of element tags corresponding to the atom types. prop_units : dict, optional dictionary where the keys are the property keys to include, and the values are units to use. If not given, only the positions in scaled units are included. a_std : float, optional Standard deviation of a lattice constant to include if available. b_std : float, optional Standard deviation of b lattice constant to include if available. c_std : float, optional Standard deviation of c lattice constant to include if available. Returns ------- DataModelDict A 'cell' data model of the system. """ # Set default values box_unit = kwargs.get('box_unit', 'angstrom') indent = kwargs.get('indent', None) symbols = kwargs.get('symbols', system.symbols) if isinstance(symbols, stringtype): symbols = [symbols] assert len(symbols) == system.natypes, 'Number of symbols does not match number of atom types' elements = kwargs.get('elements', [None for i in range(system.natypes)]) if not isinstance(elements, list): elements = [elements] assert len(elements) == system.natypes, 'Number of elements does not match number of atom types' prop_units = kwargs.get('prop_units', {}) if 'pos' not in prop_units: prop_units['pos'] = 'scaled' # Extract system values a = system.box.a b = system.box.b c = system.box.c alpha = system.box.alpha beta = system.box.beta gamma = system.box.gamma # Check for box standard deviations if 'a_std' in kwargs and 'b_std' in kwargs and 'c_std' in kwargs: errors = True a_std = kwargs['a_std'] b_std = kwargs['b_std'] c_std = kwargs['c_std'] else: errors = False a_std = None b_std = None c_std = None # Initialize DataModelDict model = DM() model['cell'] = cell = DM() # Test crystal family c_family = identifyfamily(system.box) if c_family is None: c_family = 'triclinic' cell[c_family] = DM() if c_family == 'cubic': a_ave = (a + b + c) / 3 if errors is True: a_std_ave = (a_std + b_std + c_std) / 3 else: a_std_ave = None cell[c_family]['a'] = uc.model(a_ave, box_unit, error=a_std_ave) elif c_family == 'tetragonal': a_ave = (a + b) / 2 if errors is True: a_std_ave = (a_std + b_std) / 2 else: a_std_ave = None cell[c_family]['a'] = uc.model(a_ave, box_unit, error=a_std_ave) cell[c_family]['c'] = uc.model(c, box_unit, error=c_std) elif c_family == 'orthorhombic': cell[c_family]['a'] = uc.model(a, box_unit, error=a_std) cell[c_family]['b'] = uc.model(b, box_unit, error=b_std) cell[c_family]['c'] = uc.model(c, box_unit, error=c_std) elif c_family == 'hexagonal': a_ave = (a + b) / 2 if errors is True: a_std_ave = (a_std + b_std) / 2 else: a_std_ave = None cell[c_family]['a'] = uc.model(a_ave, box_unit, error=a_std_ave) cell[c_family]['c'] = uc.model(c, box_unit, error=c_std) elif c_family == 'rhombohedral': a_ave = (a + b + c) / 3 alpha_ave = (alpha + beta + gamma) / 3 if errors is True: a_std_ave = (a_std + b_std + c_std) / 3 else: a_std_ave = None cell[c_family]['a'] = uc.model(a_ave, box_unit, error=a_std_ave) cell[c_family]['alpha'] = alpha_ave elif c_family == 'monoclinic': cell[c_family]['a'] = uc.model(a, box_unit, error=a_std) cell[c_family]['b'] = uc.model(b, box_unit, error=b_std) cell[c_family]['c'] = uc.model(c, box_unit, error=c_std) cell[c_family]['beta'] = beta elif c_family == 'triclinic': cell[c_family]['a'] = uc.model(a, box_unit, error=a_std) cell[c_family]['b'] = uc.model(b, box_unit, error=b_std) cell[c_family]['c'] = uc.model(c, box_unit, error=c_std) cell[c_family]['alpha'] = alpha cell[c_family]['beta'] = beta cell[c_family]['gamma'] = gamma else: raise ValueError('Unknown crystal family') atype = system.atoms.atype aindex = atype - 1 # Build list of atoms and per-atom properties for i in range(system.natoms): atom = DM() atom['component'] = int(atype[i]) symbol = symbols[aindex[i]] if symbol is not None: atom['symbol'] = symbol element = elements[aindex[i]] if element is not None: atom['element'] = element atom['position'] = DM() if prop_units['pos'] == 'scaled': atom['position']['value'] = list(system.atoms_prop(a_id=i, key='pos', scale=True)) else: atom['position']['value'] = list(uc.get_in_units(system.atoms.pos[i], prop_units['pos'])) atom['position']['unit'] = prop_units['pos'] for key, unit in iteritems(prop_units): if key != 'pos' and key != 'atype': value = system.atoms.view[key][i] prop = DM() prop['name'] = key prop.update(uc.model(value, unit)) atom.append('property', prop) model.append('atom', atom) model = DM([('atomic-system', model)]) # Return DataModelDict or str if 'f' not in kwargs: if 'format' not in kwargs: return model elif kwargs['format'].lower() == 'xml': return model.xml(indent=indent) elif kwargs['format'].lower() == 'json': return model.json(indent=indent) # Write to file else: f = kwargs['f'] if 'format' not in kwargs: try: format = os.path.splitext(f)[1][1:] except: format = 'json' else: format = kwargs['format'] if hasattr(f, 'write'): if format.lower() == 'xml': return model.xml(fp=f, indent=indent) elif format.lower() == 'json': return model.json(fp=f, indent=indent) else: with open(f, 'w') as fp: if format.lower() == 'xml': return model.xml(fp=fp, indent=indent) elif format.lower() == 'json': return model.json(fp=fp, indent=indent) return
def model(self, model=None, length_unit='angstrom', energyperarea_unit='eV/angstrom^2'): """ Return or set DataModelDict representation of the gamma surface. Parameters ---------- model : str, file-like object or DataModelDict, optional XML/JSON content to extract gamma surface energy from. If not given, model content will be generated. length_unit : str, optional Units to report delta displacement values in when a new model is generated. Default value is 'angstrom'. energyperarea_unit : str, optional Units to report fault energy values in when a new model is generated. Default value is 'mJ/m^2'. Returns ------- DataModelDict A dictionary containing the stacking fault data of the GammaSurface object. Returned if model is not given. """ # Set values if model given if model is not None: model = DM(model).find('stacking-fault-map') # Read in box, a1vect and a2vect box = Box(avect = model['box']['avect'], bvect = model['box']['bvect'], cvect = model['box']['cvect']) a1vect = model['shift-vector-1'] a2vect = model['shift-vector-2'] # Read in stacking fault data gsf = model.find('stacking-fault-relation') a1 = gsf['shift-vector-1-fraction'] a2 = gsf['shift-vector-2-fraction'] E_gsf = uc.value_unit(gsf['energy']) try: delta = uc.value_unit(gsf['plane-separation']) except: delta = None self.set(a1vect, a2vect, a1, a2, E_gsf, box=box, delta=delta) # Generate model else: model = DM() model['stacking-fault-map'] = sfm = DM() sfm['box'] = DM() sfm['box']['avect'] = list(self.box.avect) sfm['box']['bvect'] = list(self.box.bvect) sfm['box']['cvect'] = list(self.box.cvect) sfm['shift-vector-1'] = list(self.a1vect) sfm['shift-vector-2'] = list(self.a2vect) sfm['stacking-fault-relation'] = sfr = DM() sfr['shift-vector-1-fraction'] = list(self.data.a1) sfr['shift-vector-2-fraction'] = list(self.data.a2) sfr['energy'] = uc.model(self.data.E_gsf, energyperarea_unit) if 'delta' in self.data: sfr['plane-separation'] = uc.model(self.data.delta, length_unit) return model
def load(data, prop_info=None): """ Read a LAMMPS-style dump file and return a System. Argument: data = file name, file-like object or string to read data from. Keyword Argument: prop_info -- DataModelDict for relating the per-atom properties to/from the dump file and the System. Will create a default json instance <data>.json if prop_info is not given and <data>.json doesn't already exist. """ #read in prop_info if supplied if prop_info is not None: if isinstance(prop_info, (str, unicode)) and os.path.isfile(prop_info): with open(prop_info) as f: prop_info = f.read() prop_info = DataModelDict(prop_info) #check for default prop_info file else: try: with open(data+'.json') as fj: prop_info = DataModelDict(fj) except: prop_info = None box_unit = None #read box_unit if specified in prop_info if prop_info is not None: prop_info = prop_info.find('LAMMPS-dump-atoms_prop-relate') box_unit = prop_info['box_prop'].get('unit', None) with uber_open_rmode(data) as f: pbc = None box = None natoms = None system = None readnatoms = False readatoms = False readtimestep = False acount = 0 bcount = 3 #loop over all lines in file for line in f: terms = line.split() if len(terms) > 0: #read atomic values if time to do so if readatoms: #sort values by a_id and save to prop_vals a_id = long(terms[id_index]) - 1 prop_vals[a_id] = terms acount += 1 #save values to sys once all atoms read in if acount == natoms: readatoms = False #cycle over the defined atoms_prop in prop_info for prop, p_keys in prop_info['atoms_prop'].iteritems(): #set default keys dtype = p_keys.get('dtype', None) shape = p_keys.get('shape', None) shape = (natoms,) + np.empty(shape).shape value = np.empty(shape) #cycle over the defined LAMMPS-attributes in prop_info for attr, a_keys in prop_info['LAMMPS-attribute'].iteritems(): #cycle over list of relations for each LAMMPS-attribute for relation in a_keys.iteraslist('relation'): #if atoms prop and relation prop match if relation['prop'] == prop: #get unit and scale info unit = relation.get('unit', None) if unit == 'scaled': unit = None scale = True else: scale = False #find index of attribute in name_list a_index = name_list.index(attr) #check if relation has index listed try: index = relation['index'] if isinstance(index, list): index = (Ellipsis,) + tuple(index) else: index = (Ellipsis,) + (index,) value[index] = prop_vals[:, a_index] #scalar if no index except: value[:] = prop_vals[:, a_index] #test if values are ints if dtype not specified if dtype is None and np.allclose(np.asarray(value, dtype=int), value): value = np.asarray(value, dtype=int) else: value = np.asarray(value, dtype=dtype) #save prop values to system system.atoms_prop(key=prop, value=uc.set_in_units(value, unit), scale=scale) #read number of atoms if time to do so elif readnatoms: natoms = int(terms[0]) readnatoms = False elif readtimestep: timestep = int(terms[0]) readtimestep = False #read x boundary condition values if time to do so elif bcount == 0: xlo = uc.set_in_units(float(terms[0]), box_unit) xhi = uc.set_in_units(float(terms[1]), box_unit) if len(terms) == 3: xy = uc.set_in_units(float(terms[2]), box_unit) bcount += 1 #read y boundary condition values if time to do so elif bcount == 1: ylo = uc.set_in_units(float(terms[0]), box_unit) yhi = uc.set_in_units(float(terms[1]), box_unit) if len(terms) == 3: xz = uc.set_in_units(float(terms[2]), box_unit) bcount += 1 #read z boundary condition values if time to do so elif bcount == 2: zlo = uc.set_in_units(float(terms[0]), box_unit) zhi = uc.set_in_units(float(terms[1]), box_unit) if len(terms) == 3: yz = uc.set_in_units(float(terms[2]), box_unit) xlo = xlo - min((0.0, xy, xz, xy + xz)) xhi = xhi - max((0.0, xy, xz, xy + xz)) ylo = ylo - min((0.0, yz)) yhi = yhi - max((0.0, yz)) box = am.Box(xlo=xlo, xhi=xhi, ylo=ylo, yhi=yhi, zlo=zlo, zhi=zhi, xy=xy, xz=xz, yz=yz) else: box = am.Box(xlo=xlo, xhi=xhi, ylo=ylo, yhi=yhi, zlo=zlo, zhi=zhi) bcount += 1 #if not time to read value, check the ITEM: header information else: #only consider ITEM: lines if terms[0] == 'ITEM:': #ITEM: TIMESTEP indicates it is time to read the timestep if terms[1] == 'TIMESTEP': readtimestep = True #ITEM: NUMBER indicates it is time to read natoms elif terms[1] == 'NUMBER': readnatoms = True #ITEM: BOX gives pbc and indicates it is time to read box parameters elif terms[1] == 'BOX': pbc = [True, True, True] for i in xrange(3): if terms[i + len(terms) - 3] != 'pp': pbc[i] = False bcount = 0 #ITEM: ATOMS gives list of per-Atom property names and indicates it is time to read atomic values elif terms[1] == 'ATOMS': assert box is not None, 'Box information not found' assert natoms is not None, 'Number of atoms not found' #read list of property names name_list = terms[2:] id_index = name_list.index('id') #create empty array for reading property values prop_vals = np.empty((natoms, len(name_list))) #create and save default prop_info Data Model if needed if prop_info is None: prop_info = __prop_info_default_load(name_list) if isinstance(data, (str, unicode)) and len(data) < 80: with open(data+'.json', 'w') as fj: prop_info.json(fp=fj, indent=4) prop_info = prop_info.find('LAMMPS-dump-atoms_prop-relate') #create system and flag that it is time to read data system = am.System(atoms=am.Atoms(natoms=natoms), box=box, pbc=pbc) system.prop['timestep'] = timestep readatoms = True if system is None: raise ValueError('Failed to properly load dump file '+str(data)[:50]) return system
def dump(system, fname, prop_info=None, xf='%.13e'): """ Write a LAMMPS-style dump file from a System. Arguments: system -- System to write to the dump file. fname -- name (and location) of file to save data to. Keyword Arguments: prop_info -- DataModelDict for relating the per-atom properties to/from the dump file and the System. Will create a default json instance <fname>.json if prop_info is not given and <fname>.json doesn't already exist. xf -- c-style format for printing the floating point numbers. Default is '%.13e'. """ #create or read prop_info Data Model if prop_info is None: try: with open(fname+'.json') as fj: prop_info = DataModelDict(fj) except: prop_info = __prop_info_default_dump(system) with open(fname+'.json', 'w') as fj: prop_info.json(fp=fj, indent=4) else: if os.path.isfile(prop_info): with open(prop_info) as f: prop_info = f.read() prop_info = DataModelDict(prop_info) #read box_unit if specified in prop_info prop_info = prop_info.find('LAMMPS-dump-atoms_prop-relate') box_unit = prop_info['box_prop'].get('unit', None) #open fname with open(fname, 'w') as f: #write timestep info f.write('ITEM: TIMESTEP\n') try: f.write('%i\n'%system.prop['timestep']) except: f.write('0\n') #write number of atoms f.write('ITEM: NUMBER OF ATOMS\n') f.write('%i\n' % ( system.natoms )) #write system boundary info for an orthogonal box if system.box.xy == 0.0 and system.box.xz == 0.0 and system.box.yz == 0.0: f.write('ITEM: BOX BOUNDS') for i in xrange(3): if system.pbc[i]: f.write(' pp') else: f.write(' fm') f.write('\n') f.write('%f %f\n' % ( uc.get_in_units(system.box.xlo, box_unit), uc.get_in_units(system.box.xhi, box_unit) )) f.write('%f %f\n' % ( uc.get_in_units(system.box.ylo, box_unit), uc.get_in_units(system.box.yhi, box_unit) )) f.write('%f %f\n' % ( uc.get_in_units(system.box.zlo, box_unit), uc.get_in_units(system.box.zhi, box_unit) )) #write system boundary info for a triclinic box else: f.write('ITEM: BOX BOUNDS xy xz yz') for i in xrange(3): if system.pbc[i]: f.write(' pp') else: f.write(' fm') f.write('\n') xlo_bound = uc.get_in_units(system.box.xlo, box_unit) + uc.get_in_units(min(( 0.0, system.box.xy, system.box.xz, system.box.xy + system.box.xz)), box_unit) xhi_bound = uc.get_in_units(system.box.xhi, box_unit) + uc.get_in_units(max(( 0.0, system.box.xy, system.box.xz, system.box.xy + system.box.xz)), box_unit) ylo_bound = uc.get_in_units(system.box.ylo, box_unit) + uc.get_in_units(min(( 0.0, system.box.yz )), box_unit) yhi_bound = uc.get_in_units(system.box.yhi, box_unit) + uc.get_in_units(max(( 0.0, system.box.yz )), box_unit) zlo_bound = uc.get_in_units(system.box.zlo, box_unit) zhi_bound = uc.get_in_units(system.box.zhi, box_unit) f.write('%f %f %f\n' % ( xlo_bound, xhi_bound, uc.get_in_units(system.box.xy, box_unit) )) f.write('%f %f %f\n' % ( ylo_bound, yhi_bound, uc.get_in_units(system.box.xz, box_unit) )) f.write('%f %f %f\n' % ( zlo_bound, zhi_bound, uc.get_in_units(system.box.yz, box_unit) )) #write atomic header info and prepare outarray for writing header = 'ITEM: ATOMS id' print_string = '%i' outarray = np.empty((system.natoms, len(prop_info['LAMMPS-attribute']))) start = 0 for attr, a_keys in prop_info['LAMMPS-attribute'].iteritems(): #get first prop relation for attr relation = a_keys.aslist('relation')[0] prop = relation.get('prop') index = (Ellipsis, ) + tuple(relation.aslist('index')) unit = relation.get('unit', None) if unit == 'scaled': unit = None scale = True else: scale = False #pass values to outarray outarray[:,start] = uc.get_in_units(system.atoms_prop(key=prop, scale=scale), unit)[index].reshape((system.natoms)) start += 1 #prepare header and print_string header += ' %s' % attr if am.tools.is_dtype_int(system.atoms.dtype[prop]): print_string += ' %i' else: print_string += ' ' + xf f.write(header + '\n') print_string += '\n' #iterate over all atoms for i in xrange(system.natoms): vals = (i+1, ) + tuple(outarray[i]) f.write(print_string % vals)
def clean_records(self, run_directory=None, record_style=None): """ Resets all records of a given style that issued errors. Useful if the errors are due to external conditions. Parameters ---------- run_directory : str, optional The directory where the cleaned calculation instances are to be returned. record_style : str, optional The record style to clean. If not given, then the available record styles will be listed and the user prompted to pick one. """ if record_style is None: record_style = self.select_record_style() if record_style is not None: # Find all records of record_style that issued errors records = self.get_records(style=record_style) error_df = [] error_dict = {} for record in records: error_df.append(record.todict(full=False)) error_dict[record.name] = record.content error_df = pd.DataFrame(error_df) error_df = error_df[error_df.status == 'error'] # Loop over all error records for record_name in error_df.key.tolist(): # Check if record has saved tar try: tar = self.get_tar(name=record_name, style=record_style) except: pass else: # Copy tar back to run_directory tar.extractall(run_directory) tar.close() # Delete database version of tar try: self.delete_tar(name=record_name, style=record_style) except: pass # Remove error and status from stored record try: model = DM(error_dict[record_name]) except: pass else: model_root = list(model.keys())[0] del(model[model_root]['error']) model[model_root]['status'] = 'not calculated' self.update_record(name=record_name, style=record_style, content=model.xml()) if run_directory is not None: # Remove bid files for bidfile in glob.iglob(os.path.join(run_directory, '*', '*.bid')): os.remove(bidfile) # Remove results.json files for bidfile in glob.iglob(os.path.join(run_directory, '*', 'results.json')): os.remove(bidfile) else: raise ValueError('No run_directory supplied')