def __init__(self, args_str=None, **init_kwargs): """ Initialises Descriptor object and calculate number of dimensions and permutations. properties: - cutoff calculateable: - sizes: `n_desc, n_cross, n_index = desc.sizes(_quip_atoms)` """ if args_str is None: args_str = key_val_dict_to_str(init_kwargs) else: args_str += ' ' + key_val_dict_to_str(init_kwargs) # intialise the wrapped object and hide it from the user self._quip_descriptor = quippy.descriptors_module.descriptor(args_str) # kept for compatibility with older version # super convoluted though :D should just rethink it at some point self.n_dim = self.dimensions() self.n_perm = self.get_n_perm()
def calc(self, at, grad=False, args_str=None, cutoff=None, **calc_kwargs): """ Calculates all descriptors of this type in the Atoms object, and gradients if grad=True. Results can be accessed dictionary- or attribute-style; 'descriptor' contains descriptor values, 'descriptor_index_0based' contains the 0-based indices of the central atom(s) in each descriptor, 'grad' contains gradients, 'grad_index_0based' contains indices to gradients (descriptor, atom). Cutoffs and gradients of cutoffs are also returned. """ # arg string and calc_args if args_str is None: args_str = key_val_dict_to_str(calc_kwargs) else: # new, for compatibility: merged if both given args_str += ' ' + key_val_dict_to_str(calc_kwargs) # calc connectivity on the atoms object with the internal one self._calc_connect(at, cutoff) # descriptor calculation descriptor_out_raw = self._quip_descriptor.calc( at, do_descriptor=True, do_grad_descriptor=grad, args_str=args_str) # unpack to a list of dicts count = self.count(at) descriptor_out = dict() for i in range(count): # unpack to dict with the specific converter function mono_dict = quippy.convert.descriptor_data_mono_to_dict( descriptor_out_raw.x[i]) # add to the result for key, val in mono_dict.items(): if key in descriptor_out.keys(): descriptor_out[key].append(val) else: descriptor_out[key] = [val] # make numpy arrays out of them for key, val in descriptor_out.items(): descriptor_out[key] = np.array(val) if 'grad_data' in descriptor_out.keys(): grad = descriptor_out['grad_data'] descriptor_out['grad_data'] = grad.transpose(0, 3, 2, 1).reshape( -1, grad.shape[2], grad.shape[1]) # This is a dictionary now and hence needs to be indexed as one, unlike the old version return descriptor_out
def __init__(self, args_str="", pot1=None, pot2=None, param_str=None, param_filename=None, atoms=None, calculation_always_required=False, calc_args=None, add_arrays=None, add_info=None, **kwargs): quippy.potential_module.Potential.__init__.__doc__ self._default_properties = ['energy', 'forces'] self.calculation_always_required = calculation_always_required ase.calculators.calculator.Calculator.__init__( self, restart=None, ignore_bad_restart_file=False, label=None, atoms=atoms, **kwargs) # init the quip potential if param_filename is not None and isinstance(param_filename, str): # from a param filename self._quip_potential = quippy.potential_module.Potential.filename_initialise( args_str=args_str, param_filename=param_filename) elif pot1 is not None and pot2 is not None: # from sum of two potentials # noinspection PyProtectedMember self._quip_potential = quippy.potential_module.Potential( args_str=args_str, pot1=(pot1._quip_potential if isinstance( pot1, quippy.potential.Potential) else pot1), pot2=(pot2._quip_potential if isinstance( pot2, quippy.potential.Potential) else pot2)) else: # from a param string self._quip_potential = quippy.potential_module.Potential( args_str=args_str, param_str=param_str) # init the quip atoms as None, to have the variable self._quip_atoms = None # init the info and array keys that need to be added when converting atoms objects self.add_arrays = add_arrays self.add_info = add_info # from old if atoms is not None: atoms.set_calculator(self) self.name = args_str if isinstance(calc_args, dict): calc_args = key_val_dict_to_str(calc_args) elif calc_args is None: calc_args = "" self.calc_args = calc_args
def __init__(self, args_str, param_str=None, atoms=None, calculation_always_required=False, param_filename=None, calc_args=None, **kwargs): quippy.potential_module.Potential.__init__.__doc__ self._default_properties = ['energy', 'forces'] self.calculation_always_required = calculation_always_required ase.calculators.calculator.Calculator.__init__( self, restart=None, ignore_bad_restart_file=False, label=None, atoms=atoms, **kwargs) # init the quip potential if param_filename is not None and type(param_filename) == str: self._quip_potential = quippy.potential_module.Potential.filename_initialise( args_str=args_str, param_filename=param_filename) else: self._quip_potential = quippy.potential_module.Potential( args_str=args_str, param_str=param_str) # init the quip atoms as None, to have the variable self._quip_atoms = None # from old if atoms is not None: atoms.set_calculator(self) self.name = args_str if isinstance(calc_args, dict): calc_args = key_val_dict_to_str(calc_args) elif calc_args is None: calc_args = "" self.calc_args = calc_args
def test_complex_key_val(): complex_xyz_string = ( ' ' # start with a separator 'str=astring ' 'quot="quoted value" ' 'quote_special="a_to_Z_$%%^&*" ' r'escaped_quote="esc\"aped" ' 'true_value ' 'false_value = F ' 'integer=22 ' 'floating=1.1 ' 'int_array={1 2 3} ' 'float_array="3.3 4.4" ' 'virial="1 4 7 2 5 8 3 6 9" ' # special 3x3, fortran ordering 'not_a_3x3_array="1 4 7 2 5 8 3 6 9" ' # should be left as a 9-vector 'Lattice=" 4.3 0.0 0.0 0.0 3.3 0.0 0.0 0.0 7.0 " ' # spaces in arr 'scientific_float=1.2e7 ' 'scientific_float_2=5e-6 ' 'scientific_float_array="1.2 2.2e3 4e1 3.3e-1 2e-2" ' 'not_array="1.2 3.4 text" ' 'bool_array={T F T F} ' 'bool_array_2=" T, F, T " ' # leading spaces 'not_bool_array=[T F S] ' # read and write # '\xfcnicode_key=val\xfce ' # fails on AppVeyor 'unquoted_special_value=a_to_Z_$%%^&* ' '2body=33.3 ' 'hyphen-ated ' # parse only 'many_other_quotes="4 8 12" ' 'comma_separated="7, 4, -1" ' 'bool_array_commas=[T, T, F, T] ' 'Properties=species:S:1:pos:R:3 ' 'multiple_separators ' 'double_equals=abc=xyz ' 'trailing ' '"with space"="a value" ' r'space\"="a value" ' # tests of JSON functionality 'f_str_looks_like_array="[[1, 2, 3], [4, 5, 6]]" ' 'f_float_array="_JSON [[1.5, 2, 3], [4, 5, 6]]" ' 'f_int_array="_JSON [[1, 2], [3, 4]]" ' 'f_bool_bare ' 'f_bool_value=F ' 'f_irregular_shape="_JSON [[1, 2, 3], [4, 5]]" ' 'f_dict={_JSON {"a" : 1}} ' ) expected_dict = { 'str': 'astring', 'quot': "quoted value", 'quote_special': u"a_to_Z_$%%^&*", 'escaped_quote': 'esc"aped', 'true_value': True, 'false_value': False, 'integer': 22, 'floating': 1.1, 'int_array': np.array([1, 2, 3]), 'float_array': np.array([3.3, 4.4]), 'virial': np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]), 'not_a_3x3_array': np.array([1, 4, 7, 2, 5, 8, 3, 6, 9]), 'Lattice': np.array([[4.3, 0.0, 0.0], [0.0, 3.3, 0.0], [0.0, 0.0, 7.0]]), 'scientific_float': 1.2e7, 'scientific_float_2': 5e-6, 'scientific_float_array': np.array([1.2, 2200, 40, 0.33, 0.02]), 'not_array': "1.2 3.4 text", 'bool_array': np.array([True, False, True, False]), 'bool_array_2': np.array([True, False, True]), 'not_bool_array': 'T F S', # '\xfcnicode_key': 'val\xfce', # fails on AppVeyor 'unquoted_special_value': 'a_to_Z_$%%^&*', '2body': 33.3, 'hyphen-ated': True, 'many_other_quotes': np.array([4, 8, 12]), 'comma_separated': np.array([7, 4, -1]), 'bool_array_commas': np.array([True, True, False, True]), 'Properties': 'species:S:1:pos:R:3', 'multiple_separators': True, 'double_equals': 'abc=xyz', 'trailing': True, 'with space': 'a value', 'space"': 'a value', 'f_str_looks_like_array': '[[1, 2, 3], [4, 5, 6]]', 'f_float_array': np.array([[1.5, 2, 3], [4, 5, 6]]), 'f_int_array': np.array([[1, 2], [3, 4]]), 'f_bool_bare': True, 'f_bool_value': False, 'f_irregular_shape': np.array([[1, 2, 3], [4, 5]], object), 'f_dict': {"a": 1} } parsed_dict = extxyz.key_val_str_to_dict(complex_xyz_string) np.testing.assert_equal(parsed_dict, expected_dict) key_val_str = extxyz.key_val_dict_to_str(expected_dict) parsed_dict = extxyz.key_val_str_to_dict(key_val_str) np.testing.assert_equal(parsed_dict, expected_dict) # Round trip through a file with complex line. # Create file with the complex line and re-read it afterwards. with open('complex.xyz', 'w', encoding='utf-8') as f_out: f_out.write('1\n{}\nH 1.0 1.0 1.0'.format(complex_xyz_string)) complex_atoms = ase.io.read('complex.xyz') # test all keys end up in info, as expected for key, value in expected_dict.items(): if key in ['Properties', 'Lattice']: continue # goes elsewhere else: np.testing.assert_equal(complex_atoms.info[key], value)
def calculate(self, atoms=None, properties=None, system_changes=None, forces=None, virial=None, local_energy=None, local_virial=None, vol_per_atom=None, copy_all_results=True, calc_args=None, add_arrays=None, add_info=None, **kwargs): # handling the property inputs if properties is None: properties = self.get_default_properties() else: properties = list(set(self.get_default_properties() + properties)) if len(properties) == 0: raise RuntimeError('Nothing to calculate') for prop in properties: if prop not in self.implemented_properties: raise RuntimeError( "Don't know how to calculate property '%s'" % prop) # initialise dictionary to arguments to be passed to calculator _dict_args = {} val = _check_arg(forces) if val == 'y': properties += ['force'] elif val == 'add': properties += ['force'] _dict_args['force'] = forces val = _check_arg(virial) if val == 'y': properties += ['virial'] elif val == 'add': properties += ['virial'] _dict_args['virial'] = virial val = _check_arg(local_energy) if val == 'y': properties += ['local_energy'] elif val == 'add': properties += ['local_energy'] _dict_args['local_energy'] = local_energy val = _check_arg(local_virial) if val == 'y': properties += ['local_virial'] elif val == 'add': properties += ['local_virial'] _dict_args['local_virial'] = local_virial # needed dry run of the ase calculator ase.calculators.calculator.Calculator.calculate( self, atoms, properties, system_changes) if not self.calculation_always_required and not self.calculation_required( self.atoms, properties): return # construct the quip atoms object which we will use to calculate on # if add_arrays/add_info given to this object is not None, then OVERWRITES the value set in __init__ self._quip_atoms = quippy.convert.ase_to_quip( self.atoms, add_arrays=add_arrays if add_arrays is not None else self.add_arrays, add_info=add_info if add_info is not None else self.add_info) # constructing args_string with automatically aliasing the calculateable non-quippy properties # calc_args string to be passed to Fortran code args_str = self.calc_args if calc_args is not None: if isinstance(calc_args, dict): calc_args = key_val_dict_to_str(calc_args) args_str += ' ' + calc_args if kwargs is not None: args_str += ' ' + key_val_dict_to_str(kwargs) args_str += ' energy' # no need to add logic to energy, it is calculated anyways (returned when potential called) if 'virial' in properties or 'stress' in properties: args_str += ' virial' if 'local_virial' in properties or 'stresses' in properties: args_str += ' local_virial' if 'energies' in properties or 'local_energy' in properties: args_str += ' local_energy' if 'forces' in properties: args_str += ' force' # TODO: implement 'elastic_constants', 'unrelaxed_elastic_constants', 'numeric_forces' # fixme: workaround to get the calculated energy, because the wrapped dictionary is not handling that float well ener_dummy = np.zeros(1, dtype=float) # the calculation itself # print('Calling QUIP Potential.calc() with args_str "{}"'.format(args_str)) self._quip_potential.calc(self._quip_atoms, args_str=args_str, energy=ener_dummy, **_dict_args) # retrieve data from _quip_atoms.properties and _quip_atoms.params _quip_properties = quippy.convert.get_dict_arrays( self._quip_atoms.properties) _quip_params = quippy.convert.get_dict_arrays(self._quip_atoms.params) self.results['energy'] = ener_dummy[0] self.results['free_energy'] = self.results['energy'] # process potential output to ase.properties # not handling energy here, because that is always returned by the potential above if 'virial' in _quip_params.keys(): stress = -_quip_params['virial'].copy() / self.atoms.get_volume() # convert to 6-element array in Voigt order self.results['stress'] = np.array([ stress[0, 0], stress[1, 1], stress[2, 2], stress[1, 2], stress[0, 2], stress[0, 1] ]) self.results['virial'] = _quip_params['virial'].copy() if 'force' in _quip_properties.keys(): self.results['forces'] = np.copy(_quip_properties['force'].T) if 'local_energy' in _quip_properties.keys(): self.results['energies'] = np.copy( _quip_properties['local_energy'].T) if 'local_virial' in _quip_properties.keys(): self.results['local_virial'] = np.copy( _quip_properties['local_virial']) if 'stresses' in properties: # use the correct atomic volume if vol_per_atom is not None: if vol_per_atom in self.atoms.arrays.keys(): # case of reference to a column in atoms.arrays _v_atom = self.atoms.arrays[vol_per_atom] else: # try for case of a given volume try: _v_atom = float(vol_per_atom) except ValueError: # cannot convert to float, so wrong raise ValueError( 'volume_per_atom: not found in atoms.arrays.keys() and cannot utilise value ' 'as given atomic volume') else: # just use average _v_atom = self.atoms.get_volume() / self._quip_atoms.n self.results['stresses'] = -np.copy( _quip_properties['local_virial']).T.reshape( (self._quip_atoms.n, 3, 3), order='F') / _v_atom if isinstance(copy_all_results, bool) and copy_all_results: if atoms is not None: _at_list = [self.atoms, atoms] else: _at_list = list(self.atoms) for at in _at_list: _skip_keys = set( list(self.results.keys()) + [ 'Z', 'pos', 'species', 'map_shift', 'n_neighb', 'force', 'local_energy', 'local_virial', 'velo' ]) # default params arguments at.info['energy'] = self.results['energy'] if 'stress' in self.results.keys(): at.info['stress'] = self.results['stress'].copy() # default array arguments for key in ('forces', 'energies', 'stresses'): if key in self.results.keys(): at.arrays[key] = self.results[key].copy() # any other params for param, val in _quip_params.items(): if param not in _skip_keys: at.info[param] = cp(val) # any other arrays for prop, val in _quip_properties.items(): if prop not in _skip_keys: # transpose before copying because of setting `order=C` here; issue#151 at.arrays[prop] = np.copy(val.T, order='C')
def calc(self, at, grad=False, args_str=None, cutoff=None, **calc_kwargs): """ Calculates all descriptors of this type in the Atoms object, and gradients if grad=True. Results can be accessed dictionary- or attribute-style; 'descriptor' contains descriptor values, 'descriptor_index_0based' contains the 0-based indices of the central atom(s) in each descriptor, 'grad' contains gradients, 'grad_index_0based' contains indices to gradients (descriptor, atom). Cutoffs and gradients of cutoffs are also returned. """ # arg string and calc_args if args_str is None: args_str = key_val_dict_to_str(calc_kwargs) else: # new, for compatibility: merged if both given args_str += ' ' + key_val_dict_to_str(calc_kwargs) # calc connectivity on the atoms object with the internal one self._calc_connect(at, cutoff) # descriptor calculation descriptor_out_raw = self._quip_descriptor.calc( at, do_descriptor=True, do_grad_descriptor=grad, args_str=args_str) # unpack to a list of dicts count = self.count(at) descriptor_out = dict() for i in range(count): # unpack to dict with the specific converter function mono_dict = quippy.convert.descriptor_data_mono_to_dict( descriptor_out_raw.x[i]) # add to the result for key, val in mono_dict.items(): if key in descriptor_out.keys(): descriptor_out[key].append(val) else: descriptor_out[key] = [val] # make numpy arrays out of them for key, val in descriptor_out.items(): # merge the arrays according to shape if key in ['data', 'ci']: descriptor_out[key] = np.concatenate(val, axis=0) elif key in ['covariance_cutoff', 'has_data']: descriptor_out[key] = np.array(val) elif key in ["pos", "grad_covariance_cutoff"]: # corresponds to the gradients descriptor_out[key] = np.concatenate([x.T for x in val]) elif key == "grad_data": descriptor_out[key] = np.transpose(np.concatenate(val, axis=2), axes=(2, 1, 0)) if "ii" in descriptor_out.keys(): grad_index_0based = [] for idx, ii_perdesc in enumerate(descriptor_out["ii"]): for ii_item in ii_perdesc: grad_index_0based.append( [descriptor_out["ci"][idx], ii_item]) # same as in py2, makes iteration of gradient easier descriptor_out["grad_index_0based"] = np.array( grad_index_0based) - 1 if count > 0: descriptor_out['data'] = descriptor_out['data'].reshape( (count, -1)) else: descriptor_out['data'] = np.array([[]]) # This is a dictionary now and hence needs to be indexed as one, unlike the old version return descriptor_out
'unquoted_special_value': u'a_to_Z_$%%^&*\xfc\u2615', '2body': 33.3, 'hyphen-ated': True, 'many_other_quotes': np.array([4, 8, 12]), 'comma_separated': np.array([7, 4, -1]), 'bool_array_commas': np.array([True, True, False, True]), 'Properties': 'species:S:1:pos:R:3', 'multiple_separators': True, 'double_equals': 'abc=xyz', 'trailing': True } parsed_dict = extxyz.key_val_str_to_dict(complex_xyz_string) np.testing.assert_equal(parsed_dict, expected_dict) key_val_str = extxyz.key_val_dict_to_str(expected_dict) parsed_dict = extxyz.key_val_str_to_dict(key_val_str) np.testing.assert_equal(parsed_dict, expected_dict) # Round trip through a file with complex line. # Create file with the complex line and re-read it afterwards. # Test is disabled as it requires that file io defaults to utf-8 encoding # which is not guaranteed on Python 2 and varies with LC_ variables # on linux. Test can be enabled if ase ever strongly enforces utf-8 # everywhere. if False: with open('complex.xyz', 'w', encoding='utf-8') as f_out: f_out.write('1\n{}\nH 1.0 1.0 1.0'.format(complex_xyz_string)) complex_atoms = ase.io.read('complex.xyz') # test all keys end up in info, as expected
def calc(self, at, grad=False, args_str=None, cutoff=None, **calc_kwargs): """ Calculates all descriptors of this type in the Atoms object, and gradients if grad=True. Results can be accessed dictionary- or attribute-style; 'descriptor' contains descriptor values, 'descriptor_index_0based' contains the 0-based indices of the central atom(s) in each descriptor, 'grad' contains gradients, 'grad_index_0based' contains indices to gradients (descriptor, atom). Cutoffs and gradients of cutoffs are also returned. """ # arg string and calc_args if args_str is None: args_str = key_val_dict_to_str(calc_kwargs) else: # new, for compatibility: merged if both given args_str += ' ' + key_val_dict_to_str(calc_kwargs) # calc connectivity on the atoms object with the internal one self._calc_connect(at, cutoff) # descriptor calculation descriptor_out_raw = self._quip_descriptor.calc( at, do_descriptor=True, do_grad_descriptor=grad, args_str=args_str) # unpack to a list of dicts count = self.count(at) descriptor_out = dict() for i in range(count): # unpack to dict with the specific converter function mono_dict = quippy.convert.descriptor_data_mono_to_dict( descriptor_out_raw.x[i]) # add to the result for key, val in mono_dict.items(): if key in descriptor_out.keys(): descriptor_out[key].append(val) else: descriptor_out[key] = [val] # make numpy arrays out of them for key, val in descriptor_out.items(): # merge the arrays according to shape if key in ['has_grad_data', 'ii', 'data', 'ci']: axis = 0 elif key in ['pos', 'grad_covariance_cutoff']: axis = 1 elif key in ['covariance_cutoff', 'has_data']: descriptor_out[key] = np.array(val) continue elif key in ['grad_data']: axis = 2 else: # this is in case any yet unresolved output shows up # fixme: should this raise an exception instead axis = 0 descriptor_out[key] = np.concatenate(val, axis=axis) descriptor_out['data'] = descriptor_out['data'].reshape((count, -1)) # yield C-contiguous shaped arrays if 'grad_covariance_cutoff' in descriptor_out.keys(): descriptor_out['grad_covariance_cutoff'] = descriptor_out[ 'grad_covariance_cutoff'].transpose() if 'pos' in descriptor_out.keys(): descriptor_out['pos'] = descriptor_out['pos'].transpose() if 'grad_data' in descriptor_out.keys(): grad = descriptor_out['grad_data'] descriptor_out['grad_data'] = grad.transpose(2, 1, 0) # This is a dictionary now and hence needs to be indexed as one, unlike the old version return descriptor_out