def test_fileread(self, tmpdir): contentfile = os.path.join(str(tmpdir), 'content.txt') with open(contentfile, 'w') as f: f.write(self.content) with uber_open_rmode(contentfile) as f: content = f.read() assert content.decode('UTF-8') == self.content
def test_objectread(self, tmpdir): contentfile = os.path.join(tmpdir, 'content.txt') with open(contentfile, 'w') as f: f.write(self.content) with open(contentfile, 'rb') as openf: with uber_open_rmode(openf) as f: content = f.read() assert content.decode('UTF-8') == self.content
def read(self, log_info, append=True): """Parses a LAMMPS screen output/log file for thermodynamic data.""" #Remove existing data if append is False if append is False: self.__simulations = [] #Handle file names, strings and open file-like objects equivalently with uber_open_rmode(log_info) as log_info: headers = [] footers = [] i = 0 #for all lines in file/output for line in log_info: #skip blank lines if len(line.split()) == 0: continue #This is listed before both run and minimize simulations if 'Memory usage per processor =' in line: headers.append(i + 1) #This follows both run and minimize simulations elif 'Loop time of' in line: footers.append(i - 1) i += 1 #Add last line to footers for incomplete logs footers.append(i) #Reset file pointer log_info.seek(0) #for all lines in file/output for header, footer in zip(headers, footers): #Initialize simulation data dictionary sim = {} #Read thermo data and reset file pointer sim['thermo'] = pd.read_csv(log_info, header=header, nrows=footer - header, sep='\s+', engine='python', skip_blank_lines=True) log_info.seek(0) #Append simulation results self.__simulations.append(sim)
def load(poscar): """ Reads a poscar-style coordination file for a system. Returns an atomman.System, and a list of elements if the file gives them. """ #Read in all lines of the file with uber_open_rmode(poscar) as f: lines = f.read().split('\n') #Interpret box information box_scale = float(lines[1]) avect = np.array(lines[2].split(), dtype='float64') * box_scale bvect = np.array(lines[3].split(), dtype='float64') * box_scale cvect = np.array(lines[4].split(), dtype='float64') * box_scale box = am.Box(avect=avect, bvect=bvect, cvect=cvect) #Read in elements, number of types, and style info try: typenums = np.array(lines[5].split(), dtype='int32') elements = [None for n in xrange(len(typenums))] style = lines[6] start_i = 7 except: elements = lines[5].split() typenums = np.array(lines[6].split(), dtype='int32') style = lines[7] start_i = 8 #Build atype list atype = np.array([], dtype='int32') for i in xrange(len(typenums)): atype = np.hstack((atype, np.full(typenums[i], i + 1, dtype='int32'))) #Check which coordinate style to use if style[0] in 'cCkK': scale = False else: scale = True #Read in positions natoms = np.sum(typenums) pos = np.empty((natoms, 3), dtype='float64') count = 0 for i in xrange(start_i, len(lines)): terms = lines[i].split() if len(terms) > 0: pos[count, :] = np.array(terms, dtype='float64') count += 1 atoms = am.Atoms(natoms=natoms, prop={'atype': atype, 'pos': pos}) system = am.System(atoms=atoms, box=box, scale=scale) return system, elements
def load(poscar): """ Reads a poscar-style coordination file for a system. Returns an atomman.System, and a list of elements if the file gives them. """ #Read in all lines of the file with uber_open_rmode(poscar) as f: lines = f.read().split('\n') #Interpret box information box_scale = float(lines[1]) avect = np.array(lines[2].split(), dtype='float64') * box_scale bvect = np.array(lines[3].split(), dtype='float64') * box_scale cvect = np.array(lines[4].split(), dtype='float64') * box_scale box = am.Box(avect=avect, bvect=bvect, cvect=cvect) #Read in elements, number of types, and style info try: typenums = np.array(lines[5].split(), dtype='int32') elements = [None for n in xrange(len(typenums))] style = lines[6] start_i = 7 except: elements = lines[5].split() typenums = np.array(lines[6].split(), dtype='int32') style = lines[7] start_i = 8 #Build atype list atype = np.array([], dtype='int32') for i in xrange(len(typenums)): atype = np.hstack((atype, np.full(typenums[i], i+1, dtype='int32'))) #Check which coordinate style to use if style[0] in 'cCkK': scale = False else: scale = True #Read in positions natoms = np.sum(typenums) pos = np.empty((natoms, 3), dtype='float64') count = 0 for i in xrange(start_i, len(lines)): terms = lines[i].split() if len(terms) > 0: pos[count,:] = np.array(terms, dtype='float64') count += 1 atoms = am.Atoms(natoms=natoms, prop={'atype':atype, 'pos':pos}) system = am.System(atoms=atoms, box=box, scale=scale) return system, elements
def load(self, model, pot_dir=None): """ loads potential-LAMMPS data model info. Arguments: model -- a string or file-like obect of a json/xml data model containing a potential-LAMMPS branch. pot_dir -- (optional) the directory location of any artifacts associated with the potential. """ # Load model and find potential-LAMMPS if isinstance(model, DM): self.__dm = model.find('potential-LAMMPS') else: with uber_open_rmode(model) as f: self.__dm = DM(f).find('potential-LAMMPS') for atom in self.__dm.iteraslist('atom'): #Check if element is listed try: test = atom['element'] #If no element is listed, symbol and mass must be except: try: test = atom['symbol'] test = atom['mass'] atom['element'] = atom['symbol'] except: raise KeyError("Error reading Potential's atomic info: mass and symbol are needed if element is not given!") #Check if symbol is listed. If not, make symbol = element try: test = atom['symbol'] except: atom['symbol'] = atom['element'] #Check if mass is listed. If not, set to standard value of element try: mass_check = atom['mass'] except: atom['mass'] = atomic_mass(atom['element']) assert isinstance(atom['mass'], float), 'Mass needs to be a number!' if pot_dir is not None: self.pot_dir = pot_dir else: self.pot_dir = ''
def load(data, pbc=(True, True, True), atom_style='atomic', units='metal'): """ Read a LAMMPS-style atom data file and return a System. Argument: data = file name, file-like object or string to read data from. Keyword Arguments: pbc -- list or tuple of three boolean values indicating which System directions are periodic. Default is (True, True, True). atom_style -- LAMMPS atom_style option associated with the data file. Default is 'atomic'. units -- LAMMPS units option associated with the data file. Default is 'metal'. When the file is read in, the units of all property values are automatically converted to atomman's set working units. """ units_dict = style.unit(units) readtime = False count = 0 xy = 0.0 xz = 0.0 yz = 0.0 system = None with uber_open_rmode(data) as fp: #loop over all lines in fp for line in fp: terms = line.split() if len(terms)>0: #read atomic information if time to do so if readtime == True: a_id = int(terms[0]) - 1 prop_vals[a_id] = terms[1:] count += 1 #save values to system once all atoms read in if count == natoms: readtime = False count = 0 start = 0 #iterate over all atom_style properties for name, v in props.iteritems(): if name != 'a_id': size, dim, dtype = v value = np.asarray(prop_vals[:, start:start+size], dtype=dtype) start += size #set units according to LAMMPS units style unit = units_dict.get(dim, None) system.atoms_prop(key=name, value=uc.set_in_units(value, unit)) #read number of atoms elif len(terms) == 2 and terms[1] == 'atoms': natoms = int(terms[0]) #read number of atom types elif len(terms) == 3 and terms[1] == 'atom' and terms[2] == 'types': natypes = int(terms[0]) #read boundary info elif len(terms) == 4 and terms[2] == 'xlo' and terms[3] == 'xhi': xlo = uc.set_in_units(float(terms[0]), units_dict['length']) xhi = uc.set_in_units(float(terms[1]), units_dict['length']) elif len(terms) == 4 and terms[2] == 'ylo' and terms[3] == 'yhi': ylo = uc.set_in_units(float(terms[0]), units_dict['length']) yhi = uc.set_in_units(float(terms[1]), units_dict['length']) elif len(terms) == 4 and terms[2] == 'zlo' and terms[3] == 'zhi': zlo = uc.set_in_units(float(terms[0]), units_dict['length']) zhi = uc.set_in_units(float(terms[1]), units_dict['length']) elif len(terms) == 6 and terms[3] == 'xy' and terms[4] == 'xz' and terms[5] == 'yz': xy = uc.set_in_units(float(terms[0]), units_dict['length']) xz = uc.set_in_units(float(terms[1]), units_dict['length']) yz = uc.set_in_units(float(terms[2]), units_dict['length']) #Flag when reached data and setup for reading elif len(terms) == 1 and terms[0] in ('Atoms', 'Velocities'): #create system if not already if system is None: box = am.Box(xlo=xlo, xhi=xhi, ylo=ylo, yhi=yhi, zlo=zlo, zhi=zhi, xy=xy, xz=xz, yz=yz) system = am.System(box=box, atoms=am.Atoms(natoms=natoms), pbc = pbc) if terms[0] == 'Atoms': props = style.atom(atom_style) else: props = style.velocity(atom_style) nvals = 0 for name, v in props.iteritems(): nvals += v[0] prop_vals = np.empty((natoms, nvals-1), dtype=float) readtime = True assert system.natypes == natypes, 'Number of atom types does not match!' return system
def read(self, log_info, append=True): """Parses a LAMMPS screen output/log file for thermodynamic data.""" #Rset properties and values if append is False if append is False: self.__simulations = [] self.__lammps_version = None self.__lammps_date = None #Strings found before run and mimize simulations sim_trigger = ['Memory usage per processor =','Per MPI rank memory allocation (min/avg/max) ='] #Handle file names, strings and open file-like objects equivalently with uber_open_rmode(log_info) as log_info: headers = [] footers = [] i = 0 #for all lines in file/output for line in log_info: #skip blank lines if len(line.split()) == 0: continue #Save the LAMMPS version information if line[:8] == 'LAMMPS (' and self.lammps_version is None: month = {'Jan': 1, 'Feb': 2, 'Mar': 3, 'Apr': 4, 'May': 5, 'Jun': 6, 'Jul': 7, 'Aug': 8, 'Sep': 9, 'Oct': 10,'Nov': 11,'Dec': 12} self.__lammps_version = line.strip()[8:-1] d = self.lammps_version.split('-')[0].split() self.__lammps_date = datetime.date(int(d[2]), month[d[1]], int(d[0])) #Check for strings listed before run and minimize simulations if any([trigger in line for trigger in sim_trigger]): headers.append(i+1) #This follows both run and minimize simulations elif 'Loop time of' in line: footers.append(i-1) i += 1 #Add last line to footers for incomplete logs footers.append(i) #Reset file pointer log_info.seek(0) #for all lines in file/output for header, footer in zip(headers, footers): #Initialize simulation data dictionary sim = {} #Read thermo data and reset file pointer sim['thermo'] = pd.read_csv(log_info, header=header, nrows=footer-header, sep='\s+', engine='python', skip_blank_lines=True) log_info.seek(0) #Append simulation results self.__simulations.append(sim)
def test_stringread(self): with uber_open_rmode(self.content) as f: content = f.read() assert content.decode('UTF-8') == self.content
def parse(inscript: Union[str, io.IOBase], singularkeys: Optional[List[str]] = None, allsingular: bool = False) -> dict: """ Parses an input file and returns a dictionary of parameter terms. These are the parsing rules: - The first word in a line is taken as the key name of the parameter. - All other words are joined together into a single string value for the parameter. - Words that start with # indicate comments with that word and all words to the right of it in the same line being ignored. - Any lines with less than two non-comment terms are ignored. In other words, blank lines and lines with keys but not values are skipped over. - Multiple values can be assigned to the same term by repeating the key name on a different line. - The keyword arguments can be used to issue an error if multiple values are trying to be assigned to terms that should only have a single values. Parameters ---------- inscript : string or file-like-object The file, path to file, or contents of the input script to parse. singularkeys : list of str, optional List of term keys that should not have multiple values. allsingular : bool, optional Indicates if all term keys should be singular (Default is False). Returns ------- params : dict Dictionary of parsed input key-value pairs Raises ------ ValueError If both singularkeys and allsingular are given, or if multiple values found for a singular key. """ # Argument check if singularkeys is None: singularkeys = [] singularkeys = aslist(singularkeys) if allsingular and len(singularkeys) > 0: raise ValueError( 'allsingular and singularkeys options cannot both be given') params = {} # Open inscript with uber_open_rmode(inscript) as infile: # Iterate over all lines in infile for line in infile: try: line = line.decode('utf-8') except: pass terms = line.split() # Remove comments i = 0 index = len(line) while i < len(terms): if len(terms[i]) > 0 and terms[i][0] == '#': index = line.index(terms[i]) break i += 1 terms = terms[:i] line = line[:index] # Skip empty, comment, and valueless lines if len(terms) > 1: # Split into key and value key = terms[0] value = line.replace(key, '', 1).strip() # First time key is called save as is if key not in params: params[key] = value # Append value to key if not singular elif not allsingular and key not in singularkeys: # Append value if parameter is already a list if isinstance(params[key], list): params[key].append(value) # Convert parameter to list if needed and then append value else: params[key] = [params[key]] params[key].append(value) # Issue error for trying to append to a singular value else: raise ValueError( 'multiple values found for singular input parameter ' + key) return params
def load(data, prop_info=None): """ Read a LAMMPS-style dump file and return a System. Argument: data = file name, file-like object or string to read data from. Keyword Argument: prop_info -- DataModelDict for relating the per-atom properties to/from the dump file and the System. Will create a default json instance <data>.json if prop_info is not given and <data>.json doesn't already exist. """ #read in prop_info if supplied if prop_info is not None: if isinstance(prop_info, (str, unicode)) and os.path.isfile(prop_info): with open(prop_info) as f: prop_info = f.read() prop_info = DataModelDict(prop_info) #check for default prop_info file else: try: with open(data+'.json') as fj: prop_info = DataModelDict(fj) except: prop_info = None box_unit = None #read box_unit if specified in prop_info if prop_info is not None: prop_info = prop_info.find('LAMMPS-dump-atoms_prop-relate') box_unit = prop_info['box_prop'].get('unit', None) with uber_open_rmode(data) as f: pbc = None box = None natoms = None system = None readnatoms = False readatoms = False readtimestep = False acount = 0 bcount = 3 #loop over all lines in file for line in f: terms = line.split() if len(terms) > 0: #read atomic values if time to do so if readatoms: #sort values by a_id and save to prop_vals a_id = long(terms[id_index]) - 1 prop_vals[a_id] = terms acount += 1 #save values to sys once all atoms read in if acount == natoms: readatoms = False #cycle over the defined atoms_prop in prop_info for prop, p_keys in prop_info['atoms_prop'].iteritems(): #set default keys dtype = p_keys.get('dtype', None) shape = p_keys.get('shape', None) shape = (natoms,) + np.empty(shape).shape value = np.empty(shape) #cycle over the defined LAMMPS-attributes in prop_info for attr, a_keys in prop_info['LAMMPS-attribute'].iteritems(): #cycle over list of relations for each LAMMPS-attribute for relation in a_keys.iteraslist('relation'): #if atoms prop and relation prop match if relation['prop'] == prop: #get unit and scale info unit = relation.get('unit', None) if unit == 'scaled': unit = None scale = True else: scale = False #find index of attribute in name_list a_index = name_list.index(attr) #check if relation has index listed try: index = relation['index'] if isinstance(index, list): index = (Ellipsis,) + tuple(index) else: index = (Ellipsis,) + (index,) value[index] = prop_vals[:, a_index] #scalar if no index except: value[:] = prop_vals[:, a_index] #test if values are ints if dtype not specified if dtype is None and np.allclose(np.asarray(value, dtype=int), value): value = np.asarray(value, dtype=int) else: value = np.asarray(value, dtype=dtype) #save prop values to system system.atoms_prop(key=prop, value=uc.set_in_units(value, unit), scale=scale) #read number of atoms if time to do so elif readnatoms: natoms = int(terms[0]) readnatoms = False elif readtimestep: timestep = int(terms[0]) readtimestep = False #read x boundary condition values if time to do so elif bcount == 0: xlo = uc.set_in_units(float(terms[0]), box_unit) xhi = uc.set_in_units(float(terms[1]), box_unit) if len(terms) == 3: xy = uc.set_in_units(float(terms[2]), box_unit) bcount += 1 #read y boundary condition values if time to do so elif bcount == 1: ylo = uc.set_in_units(float(terms[0]), box_unit) yhi = uc.set_in_units(float(terms[1]), box_unit) if len(terms) == 3: xz = uc.set_in_units(float(terms[2]), box_unit) bcount += 1 #read z boundary condition values if time to do so elif bcount == 2: zlo = uc.set_in_units(float(terms[0]), box_unit) zhi = uc.set_in_units(float(terms[1]), box_unit) if len(terms) == 3: yz = uc.set_in_units(float(terms[2]), box_unit) xlo = xlo - min((0.0, xy, xz, xy + xz)) xhi = xhi - max((0.0, xy, xz, xy + xz)) ylo = ylo - min((0.0, yz)) yhi = yhi - max((0.0, yz)) box = am.Box(xlo=xlo, xhi=xhi, ylo=ylo, yhi=yhi, zlo=zlo, zhi=zhi, xy=xy, xz=xz, yz=yz) else: box = am.Box(xlo=xlo, xhi=xhi, ylo=ylo, yhi=yhi, zlo=zlo, zhi=zhi) bcount += 1 #if not time to read value, check the ITEM: header information else: #only consider ITEM: lines if terms[0] == 'ITEM:': #ITEM: TIMESTEP indicates it is time to read the timestep if terms[1] == 'TIMESTEP': readtimestep = True #ITEM: NUMBER indicates it is time to read natoms elif terms[1] == 'NUMBER': readnatoms = True #ITEM: BOX gives pbc and indicates it is time to read box parameters elif terms[1] == 'BOX': pbc = [True, True, True] for i in xrange(3): if terms[i + len(terms) - 3] != 'pp': pbc[i] = False bcount = 0 #ITEM: ATOMS gives list of per-Atom property names and indicates it is time to read atomic values elif terms[1] == 'ATOMS': assert box is not None, 'Box information not found' assert natoms is not None, 'Number of atoms not found' #read list of property names name_list = terms[2:] id_index = name_list.index('id') #create empty array for reading property values prop_vals = np.empty((natoms, len(name_list))) #create and save default prop_info Data Model if needed if prop_info is None: prop_info = __prop_info_default_load(name_list) if isinstance(data, (str, unicode)) and len(data) < 80: with open(data+'.json', 'w') as fj: prop_info.json(fp=fj, indent=4) prop_info = prop_info.find('LAMMPS-dump-atoms_prop-relate') #create system and flag that it is time to read data system = am.System(atoms=am.Atoms(natoms=natoms), box=box, pbc=pbc) system.prop['timestep'] = timestep readatoms = True if system is None: raise ValueError('Failed to properly load dump file '+str(data)[:50]) return system
def load(data, prop_info=None): """ Read a LAMMPS-style dump file and return a System. Argument: data = file name, file-like object or string to read data from. Keyword Argument: prop_info -- DataModelDict for relating the per-atom properties to/from the dump file and the System. Will create a default json instance <data>.json if prop_info is not given and <data>.json doesn't already exist. """ #read in prop_info if supplied if prop_info is not None: if isinstance(prop_info, (str, unicode)) and os.path.isfile(prop_info): with open(prop_info) as f: prop_info = f.read() prop_info = DataModelDict(prop_info) #check for default prop_info file else: try: with open(data + '.json') as fj: prop_info = DataModelDict(fj) except: prop_info = None box_unit = None #read box_unit if specified in prop_info if prop_info is not None: prop_info = prop_info.find('LAMMPS-dump-atoms_prop-relate') box_unit = prop_info['box_prop'].get('unit', None) with uber_open_rmode(data) as f: pbc = None box = None natoms = None system = None readnatoms = False readatoms = False readtimestep = False acount = 0 bcount = 3 #loop over all lines in file for line in f: terms = line.split() if len(terms) > 0: #read atomic values if time to do so if readatoms: #sort values by a_id and save to prop_vals a_id = long(terms[id_index]) - 1 prop_vals[a_id] = terms acount += 1 #save values to sys once all atoms read in if acount == natoms: readatoms = False #cycle over the defined atoms_prop in prop_info for prop, p_keys in prop_info['atoms_prop'].iteritems( ): #set default keys dtype = p_keys.get('dtype', None) shape = p_keys.get('shape', None) shape = (natoms, ) + np.empty(shape).shape value = np.empty(shape) #cycle over the defined LAMMPS-attributes in prop_info for attr, a_keys in prop_info[ 'LAMMPS-attribute'].iteritems(): #cycle over list of relations for each LAMMPS-attribute for relation in a_keys.iteraslist('relation'): #if atoms prop and relation prop match if relation['prop'] == prop: #get unit and scale info unit = relation.get('unit', None) if unit == 'scaled': unit = None scale = True else: scale = False #find index of attribute in name_list a_index = name_list.index(attr) #check if relation has index listed try: index = relation['index'] if isinstance(index, list): index = ( Ellipsis, ) + tuple(index) else: index = (Ellipsis, ) + ( index, ) value[index] = prop_vals[:, a_index] #scalar if no index except: value[:] = prop_vals[:, a_index] #test if values are ints if dtype not specified if dtype is None and np.allclose( np.asarray(value, dtype=int), value): value = np.asarray(value, dtype=int) else: value = np.asarray(value, dtype=dtype) #save prop values to system system.atoms_prop(key=prop, value=uc.set_in_units( value, unit), scale=scale) #read number of atoms if time to do so elif readnatoms: natoms = int(terms[0]) readnatoms = False elif readtimestep: timestep = int(terms[0]) readtimestep = False #read x boundary condition values if time to do so elif bcount == 0: xlo = uc.set_in_units(float(terms[0]), box_unit) xhi = uc.set_in_units(float(terms[1]), box_unit) if len(terms) == 3: xy = uc.set_in_units(float(terms[2]), box_unit) bcount += 1 #read y boundary condition values if time to do so elif bcount == 1: ylo = uc.set_in_units(float(terms[0]), box_unit) yhi = uc.set_in_units(float(terms[1]), box_unit) if len(terms) == 3: xz = uc.set_in_units(float(terms[2]), box_unit) bcount += 1 #read z boundary condition values if time to do so elif bcount == 2: zlo = uc.set_in_units(float(terms[0]), box_unit) zhi = uc.set_in_units(float(terms[1]), box_unit) if len(terms) == 3: yz = uc.set_in_units(float(terms[2]), box_unit) xlo = xlo - min((0.0, xy, xz, xy + xz)) xhi = xhi - max((0.0, xy, xz, xy + xz)) ylo = ylo - min((0.0, yz)) yhi = yhi - max((0.0, yz)) box = am.Box(xlo=xlo, xhi=xhi, ylo=ylo, yhi=yhi, zlo=zlo, zhi=zhi, xy=xy, xz=xz, yz=yz) else: box = am.Box(xlo=xlo, xhi=xhi, ylo=ylo, yhi=yhi, zlo=zlo, zhi=zhi) bcount += 1 #if not time to read value, check the ITEM: header information else: #only consider ITEM: lines if terms[0] == 'ITEM:': #ITEM: TIMESTEP indicates it is time to read the timestep if terms[1] == 'TIMESTEP': readtimestep = True #ITEM: NUMBER indicates it is time to read natoms elif terms[1] == 'NUMBER': readnatoms = True #ITEM: BOX gives pbc and indicates it is time to read box parameters elif terms[1] == 'BOX': pbc = [True, True, True] for i in xrange(3): if terms[i + len(terms) - 3] != 'pp': pbc[i] = False bcount = 0 #ITEM: ATOMS gives list of per-Atom property names and indicates it is time to read atomic values elif terms[1] == 'ATOMS': assert box is not None, 'Box information not found' assert natoms is not None, 'Number of atoms not found' #read list of property names name_list = terms[2:] id_index = name_list.index('id') #create empty array for reading property values prop_vals = np.empty((natoms, len(name_list))) #create and save default prop_info Data Model if needed if prop_info is None: prop_info = __prop_info_default_load(name_list) if isinstance( data, (str, unicode)) and len(data) < 80: with open(data + '.json', 'w') as fj: prop_info.json(fp=fj, indent=4) prop_info = prop_info.find( 'LAMMPS-dump-atoms_prop-relate') #create system and flag that it is time to read data system = am.System(atoms=am.Atoms(natoms=natoms), box=box, pbc=pbc) system.prop['timestep'] = timestep readatoms = True if system is None: raise ValueError('Failed to properly load dump file ' + str(data)[:50]) return system
def parse(inscript, singularkeys=[], allsingular=False): """ Parses an input file and returns a dictionary of parameter terms. These are the parsing rules: - The first word in a line is taken as the key name of the parameter. - All other words are joined together into a single string value for the parameter. - Words that start with # indicate comments with that word and all words to the right of it in the same line being ignored. - Any lines with less than two non-comment terms are ignored. In other words, blank lines and lines with keys but not values are skipped over. - Multiple values can be assigned to the same term by repeating the key name on a different line. - The keyword arguments can be used to issue an error if multiple values are trying to be assigned to terms that should only have a single values. Parameters ---------- inscript : string or file-like-object The file, path to file, or contents of the input script to parse. singularkeys : list of str, optional List of term keys that should not have multiple values. allsingular : bool, optional Indicates if all term keys should be singular (Default is False). Returns ------- params : dict Dictionary of parsed input key-value pairs Raises ------ ValueError If both singularkeys and allsingular are given, or if multiple values found for a singular key. """ # Argument check singularkeys = aslist(singularkeys) if allsingular and len(singularkeys) > 0: raise ValueError('allsingular and singularkeys options cannot both be given') params = {} # Open inscript with uber_open_rmode(inscript) as infile: # Iterate over all lines in infile for line in infile: try: line = line.decode('utf-8') except: pass terms = line.split() # Remove comments i = 0 index = len(line) while i < len(terms): if len(terms[i]) > 0 and terms[i][0] == '#': index = line.index(terms[i]) break i += 1 terms = terms[:i] line = line[:index] # Skip empty, comment, and valueless lines if len(terms) > 1: # Split into key and value key = terms[0] value = line.replace(key, '', 1).strip() # First time key is called save as is if key not in params: params[key] = value # Append value to key if not singular elif not allsingular and key not in singularkeys: # Append value if parameter is already a list if isinstance(params[key], list): params[key].append(value) # Convert parameter to list if needed and then append value else: params[key] = [params[key]] params[key].append(value) # Issue error for trying to append to a singular value else: raise ValueError('multiple values found for singular input parameter ' + key) return params