Ejemplo n.º 1
0
 def do_eq_setrv(self, tasks, logger_level=3):
     '''
         Set the rest values to their respective AI equilibrium values.
     '''
     with log.section('EQSET', 2, timer='Equil Set RV'):
         self.reset_system()
         log.dump('Setting rest values to AI equilibrium values for tasks %s' %' '.join(tasks))
         for term in self.valence.terms:
             vterm = self.valence.vlist.vtab[term.index]
             if np.array([task in term.tasks for task in tasks]).any():
                 if term.kind==3:#cross term
                     ic0 = self.valence.iclist.ictab[vterm['ic0']]
                     ic1 = self.valence.iclist.ictab[vterm['ic1']]
                     self.valence.set_params(term.index, rv0=ic0['value'], rv1=ic1['value'])
                 elif term.kind==4 and term.ics[0].kind==4:#Cosine of DihedAngle
                     ic = self.valence.iclist.ictab[vterm['ic0']]
                     m = self.valence.get_params(term.index, only='m')
                     rv = ic['value']%(360.0*deg/m)
                     with log.section('EQSET', 4, timer='Equil Set RV'):
                         log.dump('Set rest value of %s(%s) (eq=%.3f deg) to %.3f deg' %(
                             term.basename,
                             '.'.join([str(at) for at in term.get_atoms()]),
                             ic['value']/deg, rv/deg
                         ))
                     self.valence.set_params(term.index, rv0=rv)
                 else:
                     rv = self.valence.iclist.ictab[vterm['ic0']]['value']
                     self.valence.set_params(term.index, rv0=rv)
         self.valence.dump_logger(print_level=logger_level)
         self.average_pars()
Ejemplo n.º 2
0
 def plot_trajectories(self, do_valence=False):
     '''
         Plot energy contributions along perturbation trajectories and dump
         perturbation trajectories to XYZ files.
     '''
     only = self.kwargs.get('only_traj', 'PT_ALL')
     if not isinstance(only, list): only = [only]
     with log.section('PLOT', 3, timer='PT plot energy'):
         if self.kwargs.get('plot_traj', False):
             ffrefs = self.kwargs.get('ffrefs', [])
             valence = None
             if do_valence: valence = self.valence
             for trajectory in self.trajectories:
                 if trajectory is None: continue
                 for pattern in only:
                     if pattern == 'PT_ALL' or pattern in trajectory.term.basename:
                         trajectory.plot(self.ai,
                                         ffrefs=ffrefs,
                                         valence=valence)
     with log.section('XYZ', 3, timer='PT dump XYZ'):
         if self.kwargs.get('xyz_traj', False):
             for trajectory in self.trajectories:
                 if trajectory is None: continue
                 for pattern in only:
                     if pattern == 'PT_ALL' or pattern in trajectory.term.basename:
                         trajectory.to_xyz()
Ejemplo n.º 3
0
 def do_eq_setrv(self, tasks, logger_level=3):
     '''
         Set the rest values to their respective AI equilibrium values.
     '''
     with log.section('EQSET', 2, timer='Equil Set RV'):
         self.reset_system()
         log.dump(
             'Setting rest values to AI equilibrium values for tasks %s' %
             ' '.join(tasks))
         for term in self.valence.terms:
             vterm = self.valence.vlist.vtab[term.index]
             if np.array([task in term.tasks for task in tasks]).any():
                 if term.kind == 3:  #cross term
                     ic0 = self.valence.iclist.ictab[vterm['ic0']]
                     ic1 = self.valence.iclist.ictab[vterm['ic1']]
                     self.valence.set_params(term.index,
                                             rv0=ic0['value'],
                                             rv1=ic1['value'])
                 elif term.kind == 4 and term.ics[
                         0].kind == 4:  #Cosine of DihedAngle
                     ic = self.valence.iclist.ictab[vterm['ic0']]
                     m = self.valence.get_params(term.index, only='m')
                     rv = ic['value'] % (360.0 * deg / m)
                     with log.section('EQSET', 4, timer='Equil Set RV'):
                         log.dump(
                             'Set rest value of %s(%s) (eq=%.3f deg) to %.3f deg'
                             % (term.basename, '.'.join([
                                 str(at) for at in term.get_atoms()
                             ]), ic['value'] / deg, rv / deg))
                     self.valence.set_params(term.index, rv0=rv)
                 else:
                     rv = self.valence.iclist.ictab[vterm['ic0']]['value']
                     self.valence.set_params(term.index, rv0=rv)
         self.valence.dump_logger(print_level=logger_level)
         self.average_pars()
Ejemplo n.º 4
0
 def modify_term(self, term_index, pot, ics, basename, tasks, units):
     '''
         Modify the term with given index to a new valence term.
     '''
     #TODO: only allow masters and automatically also modify the slaves
     with log.section('VAL', 2):
         #modify in valence.terms
         old_term = self.terms[term_index]
         new_term = Term(term_index,
                         basename,
                         pot.kind,
                         ics,
                         tasks,
                         units,
                         master=old_term.master,
                         slaves=old_term.slaves)
         self.terms[term_index] = new_term
         #modify in valence.vlist.vtab
         vterm = self.vlist.vtab[term_index]
         if pot.kind == 1:  #all 4 parameters of PolyFour are given as 1 tuple
             args = [(None, ) * len(units)] + ics
         else:
             args = [
                 None,
             ] * len(units) + ics
         new = pot(*args)
         vterm['kind'] = new.kind
         for i in xrange(len(new.pars)):
             vterm['par%i' % i] = new.pars[i]
         ic_indexes = new.get_ic_indexes(self.iclist)
         for i in xrange(len(ic_indexes)):
             vterm['ic%i' % i] = ic_indexes[i]
Ejemplo n.º 5
0
    def __init__(self, system, valence):
        '''
            **Arguments**

            system
                an instance of the Yaff System class defining the system

            valence
                an instance of ValenceFF defining the valence force field
        '''
        self.system = system
        self.valence = valence
        log.dump('Initializing strain')
        with log.section('STRAIN', 3, timer='Initializing'):
            self.strains = [
                None,
            ] * len(valence.terms)
            for term in valence.terms:
                cons_ic = term.ics[0]
                cons_ic_atindexes = term.get_atoms()
                #collect all other ics
                ics = []
                for term2 in valence.terms:
                    #if term2.index == term.index: continue #not the current term ic
                    if term2.kind == 3: continue  #not a cross term ic
                    ics.append(term2.ics[0])
                self.strains[term.index] = Strain(system, cons_ic,
                                                  cons_ic_atindexes, ics)
Ejemplo n.º 6
0
 def gradient(self, X):
     '''
         Compute the gradient of the strain wrt Cartesian coordinates of the
         system. For every ic that needs to be constrained, a Lagrange multiplier
         is included.
     '''
     #small check
     #assert X.shape[0] ==  self.ndof + 1
     #initialize return value
     grad = np.zeros((len(X), ))
     #compute strain gradient
     gstrain = np.zeros(self.coords0.shape)
     self.update_pos(self.coords0 + X[:self.ndof].reshape((-1, 3)))
     self.value = self.compute(gpos=gstrain)
     #compute constraint gradient
     gconstraint = np.zeros(self.coords0.shape)
     self.constraint.update_pos(self.coords0 +
                                X[:self.ndof].reshape((-1, 3)))
     self.constrain_value = self.constraint.compute(gpos=gconstraint) + 1.0
     #construct gradient
     grad[:self.ndof] = gstrain.reshape(
         (-1, )) + X[self.ndof] * gconstraint.reshape((-1, ))
     grad[self.ndof] = self.constrain_value - self.constrain_target
     #cartesian penalty, i.e. extra penalty for deviation w.r.t. cartesian equilibrium coords
     indices = np.array([[3 * i, 3 * i + 1, 3 * i + 2]
                         for i in xrange(self.ndof / 3)
                         if i not in self.cons_ic_atindexes]).ravel()
     if len(indices) > 0:
         grad[indices] += X[indices] / (self.ndof * self.cart_penalty**2)
     with log.section('PTGEN', 4, timer='PT Generate'):
         log.dump('      Gradient:  rms = %.3e  max = %.3e  cnstr = %.3e' %
                  (np.sqrt(
                      (grad[:self.ndof]**2).mean()), max(
                          grad[:self.ndof]), grad[self.ndof]))
     return grad
Ejemplo n.º 7
0
    def do_pt_estimate(self, do_valence=False, logger_level=3):
        '''
            Estimate force constants and rest values from the perturbation
            trajectories

            **Optional Arguments**

            do_valence
                if set to True, the current valence force field will be used to
                estimate the contribution of all other valence terms.
        '''
        with log.section('PTEST', 2, timer='PT Estimate'):
            self.reset_system()
            message = 'Estimating FF parameters from perturbation trajectories'
            if do_valence: message += ' with valence reference'
            log.dump(message)
            ffrefs = self.kwargs.get('ffrefs', [])
            #compute fc and rv from trajectory
            for traj in self.trajectories:
                if traj is None: continue
                self.perturbation.estimate(traj,
                                           self.ai,
                                           ffrefs=ffrefs,
                                           do_valence=do_valence)
            #set force field parameters to computed fc and rv
            for traj in self.trajectories:
                if traj is None: continue
                self.valence.set_params(traj.term.index,
                                        fc=traj.fc,
                                        rv0=traj.rv)
            #output
            self.valence.dump_logger(print_level=logger_level)
Ejemplo n.º 8
0
 def run(self):
     with log.section('PROGRAM', 2):
         self.do_eq_setrv(['EQ_RV'])
         self.do_pt_generate()
         self.do_pt_estimate(energy_noise=self.settings.pert_traj_energy_noise)
         if self.settings.plot_traj is not None and (self.settings.plot_traj.lower() in ['Apt1', 'all']):
             self.plot_trajectories(do_valence=False, suffix='_Apt1')
         if self.settings.xyz_traj is not None and self.settings.xyz_traj:
             self.write_trajectories()
         self.do_pt_postprocess()
         self.do_cross_init()
         self.do_hc_estimatefc(['HC_FC_DIAG', 'HC_FC_CROSS_ASS', 'HC_FC_CROSS_ASA'], do_mass_weighting=self.settings.do_hess_mass_weighting)
         if self.settings.plot_traj is not None and (self.settings.plot_traj.lower() in ['Bhc1', 'all']):
             self.plot_trajectories(do_valence=True, suffix='_Bhc1')
         self.do_pt_estimate(do_valence=True, energy_noise=self.settings.pert_traj_energy_noise)
         if self.settings.plot_traj is not None and (self.settings.plot_traj.lower() in ['Cpt2', 'all']):
             self.plot_trajectories(do_valence=True, suffix='_Cpt2')
         self.do_pt_postprocess()
         if self.settings.consistent_cross_rvs:
             # The rest values of the diagonal terms have been updated from
             # the perturbation trajectories; update the corresponding rest
             # values for the cross terms
             self.update_cross_pars()
         self.do_hc_estimatefc(['HC_FC_DIAG', 'HC_FC_CROSS_ASS', 'HC_FC_CROSS_ASA'], do_mass_weighting=self.settings.do_hess_mass_weighting)
         if self.settings.plot_traj is not None and (self.settings.plot_traj.lower() in ['Dhc2', 'all']):
             self.plot_trajectories(do_valence=True, suffix='_Dhc2')
         self.do_hc_estimatefc([
             'HC_FC_CROSS_ASS', 'HC_FC_CROSS_ASA', 'HC_FC_CROSS_DSS',
             'HC_FC_CROSS_DSD', 'HC_FC_CROSS_DAA', 'HC_FC_CROSS_DAD'
         ], logger_level=1, do_mass_weighting=self.settings.do_hess_mass_weighting, do_svd=self.settings.do_cross_svd, svd_rcond=self.settings.cross_svd_rcond)
         self.make_output()
Ejemplo n.º 9
0
    def do_pt_estimate(self, do_valence=False, energy_noise=None, logger_level=3):
        '''
            Estimate force constants and rest values from the perturbation
            trajectories

            **Optional Arguments**

            do_valence
                if set to True, the current valence force field will be used to
                estimate the contribution of all other valence terms.
        '''
        with log.section('PTEST', 2, timer='PT Estimate'):
            self.reset_system()
            message = 'Estimating FF parameters from perturbation trajectories'
            if do_valence: message += ' with valence reference'
            log.dump(message)
            #compute fc and rv from trajectory
            only = self.settings.only_traj
            for traj in self.trajectories:
                if traj is None: continue
                if not (only is None or only=='PT_ALL' or only=='pt_all'):
                    if isinstance(only, str): only = [only]
                    basename = self.valence.terms[traj.term.master].basename
                    if basename not in only: continue
                self.perturbation.estimate(traj, self.ai, ffrefs=self.ffrefs, do_valence=do_valence, energy_noise=energy_noise)
            #set force field parameters to computed fc and rv
            for traj in self.trajectories:
                if traj is None: continue
                if not (only is None or only=='PT_ALL' or only=='pt_all'):
                    if isinstance(only, str): only = [only]
                    basename = self.valence.terms[traj.term.master].basename
                    if basename not in only: continue
                self.valence.set_params(traj.term.index, fc=traj.fc, rv0=traj.rv)
            #output
            self.valence.dump_logger(print_level=logger_level)
Ejemplo n.º 10
0
    def __init__(self, system, ai, settings, ffrefs=[]):
        '''
            **Arguments**

            system
                a Yaff `System` instance defining the system

            ai
                a `Reference` instance corresponding to the ab initio input data

            settings
                a `Settings` instance defining all QuickFF settings

            **Optional Arguments**

            ffrefs
                a list of `Reference` instances defining the a-priori force
                field contributions.
        '''
        with log.section('INIT', 1, timer='Initializing'):
            log.dump('Initializing program')
            self.settings = settings
            self.system = system
            self.ai = ai
            self.ffrefs = ffrefs
            self.valence = ValenceFF(system, settings)
            self.perturbation = RelaxedStrain(system, self.valence, settings)
            self.trajectories = None
            self.print_system()
Ejemplo n.º 11
0
 def do_cross_init(self):
     '''
         Set the rest values of cross terms to the rest values of the
         corresponding diagonal terms. The force constants are initialized
         to zero.
     '''
     with log.section('VAL', 2, 'Initializing'):
         self.reset_system()
         self.valence.init_cross_terms()
         for index in xrange(self.valence.vlist.nv):
             term = self.valence.vlist.vtab[index]
             if term['kind'] != 3: continue
             rv0, rv1 = None, None
             for index2 in xrange(self.valence.vlist.nv):
                 term2 = self.valence.vlist.vtab[index2]
                 if term2['kind'] == 3: continue
                 if term['ic0'] == term2['ic0']:
                     assert rv0 is None
                     rv0 = self.valence.get_params(index2, only='rv')
                 if term['ic1'] == term2['ic0']:
                     assert rv1 is None
                     rv1 = self.valence.get_params(index2, only='rv')
             if rv0 is None or rv1 is None:
                 raise ValueError('No rest values found for %s' %
                                  self.valence.terms[index].basename)
             self.valence.set_params(index, fc=0.0, rv0=rv0, rv1=rv1)
Ejemplo n.º 12
0
 def do_pt_generate(self):
     'Generate perturbation trajectories.'
     with log.section('PTGEN', 2, timer='PT Generate'):
         #read if an existing file was specified through fn_traj
         fn_traj = self.kwargs.get('fn_traj', None)
         if fn_traj is not None and os.path.isfile(fn_traj):
             self.trajectories = cPickle.load(open(fn_traj, 'r'))
             log.dump('Trajectories read from file %s' % fn_traj)
             self.update_trajectory_terms()
             newname = 'updated_' + fn_traj.split('/')[-1]
             cPickle.dump(self.trajectories, open(newname, 'w'))
             return
         #configure
         self.reset_system()
         only = self.kwargs.get('only_traj', 'PT_ALL')
         if isinstance(only, str):
             do_terms = [
                 term for term in self.valence.terms if only in term.tasks
             ]
         else:
             do_terms = []
             for pattern in only:
                 for term in self.valence.iter_terms(pattern):
                     do_terms.append(term)
         trajectories = self.perturbation.prepare(do_terms)
         #compute
         log.dump('Constructing trajectories')
         self.trajectories = paracontext.map(
             self.perturbation.generate,
             [traj for traj in trajectories if traj.active])
         #write the trajectories to the non-existing file fn_traj
         if fn_traj is not None:
             assert not os.path.isfile(fn_traj)
             cPickle.dump(self.trajectories, open(fn_traj, 'w'))
             log.dump('Trajectories stored to file %s' % fn_traj)
Ejemplo n.º 13
0
 def do_pt_generate(self):
     '''
         Generate perturbation trajectories.
     '''
     with log.section('PTGEN', 2, timer='PT Generate'):
         #read if an existing file was specified through fn_traj
         fn_traj = self.settings.fn_traj
         if fn_traj is not None and os.path.isfile(fn_traj):
             self.trajectories = pickle.load(open(fn_traj, 'rb'))
             log.dump('Trajectories read from file %s' %fn_traj)
             self.update_trajectory_terms()
             newname = 'updated_'+fn_traj.split('/')[-1]
             pickle.dump(self.trajectories, open(newname, 'wb'))
             return
         #configure
         self.reset_system()
         only = self.settings.only_traj
         if only is None or only=='PT_ALL' or only=='pt_all':
             do_terms = [term for term in self.valence.terms if term.kind in [0,2,11,12]]
         else:
             if isinstance(only, str): only = [only]
             do_terms = []
             for pattern in only:
                 for term in self.valence.iter_terms(pattern):
                     if term.kind in [0,2,11,12]:
                         do_terms.append(term)
         trajectories = self.perturbation.prepare(do_terms)
         #compute
         log.dump('Constructing trajectories')
         self.trajectories = paracontext.map(self.perturbation.generate, [traj for traj in trajectories if traj.active])
         #write the trajectories to the non-existing file fn_traj
         if fn_traj is not None:
             assert not os.path.isfile(fn_traj)
             pickle.dump(self.trajectories, open(fn_traj, 'wb'))
             log.dump('Trajectories stored to file %s' %fn_traj)
Ejemplo n.º 14
0
    def __init__(self, system, ai, settings, ffrefs=[]):
        '''
            **Arguments**

            system
                a Yaff `System` instance defining the system

            ai
                a `Reference` instance corresponding to the ab initio input data

            settings
                a `Settings` instance defining all QuickFF settings

            **Optional Arguments**

            ffrefs
                a list of `Reference` instances defining the a-priori force
                field contributions.
        '''
        with log.section('INIT', 1, timer='Initializing'):
            log.dump('Initializing program')
            self.settings = settings
            self.system = system
            self.ai = ai
            self.ffrefs = ffrefs
            self.valence = ValenceFF(system, settings)
            self.perturbation = RelaxedStrain(system, self.valence, settings)
            self.trajectories = None
            self.print_system()
Ejemplo n.º 15
0
    def update_cross_pars(self):
        '''
            Set the rest values of cross terms to the rest values of the
            corresponding diagonal terms. Set the force constants to zero.
        '''
        with log.section('VAL', 2, 'Initializing'):

            def find_rest_value(iterm):
                term = self.valence.terms[iterm]
                if term.basename.startswith(
                        'TorsCheby') or term.basename.startswith('BendCheby'):
                    return -self.valence.get_params(iterm, only='sign')
                else:
                    return self.valence.get_params(iterm, only='rv')

            # Bond-Bond Cross terms
            cases = [('Cross', 'bb', 3), ('Cross', 'b0a', 3),
                     ('Cross', 'b1a', 3)]

            # Bond-Dihedral Cross terms
            for m in [1, 2, 3, 4, 6]:
                for suffix in ['bb', 'b0d', 'b1d', 'b2d']:
                    case = ('CrossBondDih%i' % m, suffix, 4)
                    cases.append(case)

            # Angle-Dihedral Cross terms
            for m in [1, 2, 3, 4, 6]:
                for suffix in ['aa', 'a0d', 'a1d']:
                    case = ('CrossBendDih%i' % m, suffix, 4)
                    cases.append(case)

                for suffix in ['a0d', 'a1d']:
                    case = ('CrossCBendDih%i' % m, suffix, 4)
                    cases.append(case)
Ejemplo n.º 16
0
 def gradient(self, X):
     '''
         Compute the gradient of the strain w.r.t. Cartesian coordinates of
         the system. For the ic that needs to be constrained, a Lagrange
         multiplier is included.
     '''
     #initialize return value
     grad = np.zeros((len(X),))
     #compute strain gradient
     gstrain = np.zeros(self.coords0.shape)
     self.update_pos(self.coords0 + X[:self.ndof].reshape((-1,3)))
     self.value = self.compute(gpos=gstrain)
     #compute constraint gradient
     gconstraint = np.zeros(self.coords0.shape)
     self.constraint.update_pos(self.coords0 + X[:self.ndof].reshape((-1,3)))
     self.constrain_value = self.constraint.compute(gpos=gconstraint) + 1.0
     #construct gradient
     grad[:self.ndof] = gstrain.reshape((-1,)) + X[self.ndof]*gconstraint.reshape((-1,))
     grad[self.ndof] = self.constrain_value - self.constrain_target
     #cartesian penalty, i.e. extra penalty for deviation w.r.t. cartesian equilibrium coords
     indices = np.array([[3*i,3*i+1,3*i+2] for i in range(self.ndof//3) if i not in self.cons_ic_atindexes]).ravel()
     if len(indices)>0:
         grad[indices] += X[indices]/(self.ndof*self.cart_penalty**2)
     with log.section('PTGEN', 4, timer='PT Generate'):
         log.dump('      Gradient:  rms = %.3e  max = %.3e  cnstr = %.3e' %(np.sqrt((grad[:self.ndof]**2).mean()), max(grad[:self.ndof]), grad[self.ndof]))
     return grad
Ejemplo n.º 17
0
 def print_system(self):
     '''
         dump overview of atoms (and associated parameters) in the system
     '''
     with log.section('SYS', 3, timer='Initializing'):
         log.dump('Atomic configuration of the system:')
         log.dump('')
         log.dump(
             '  index  |  x [A]  |  y [A]  |  z [A]  | ffatype |    q    |  R [A]  '
         )
         log.dump(
             '---------------------------------------------------------------------'
         )
         for i in range(len(self.system.numbers)):
             x, y, z = self.system.pos[i, 0], self.system.pos[
                 i, 1], self.system.pos[i, 2]
             if self.system.charges is not None:
                 q = self.system.charges[i]
             else:
                 q = np.nan
             if self.system.radii is not None:
                 R = self.system.radii[i]
             else:
                 R = np.nan
             log.dump(
                 '  %4i   | % 7.3f | % 7.3f | % 7.3f |  %6s | % 7.3f | % 7.3f '
                 % (i, x / angstrom, y / angstrom, z / angstrom,
                    self.system.ffatypes[self.system.ffatype_ids[i]], q,
                    R / angstrom))
Ejemplo n.º 18
0
 def run(self):
     with log.section('PROGRAM', 2):
         fn_traj = self.kwargs.get('fn_traj', None)
         assert fn_traj is not None, 'It is useless to run the MakeTrajectories program without specifying a trajectory filename fn_traj!'
         assert not os.path.isfile(
             fn_traj
         ), 'Given file %s to store trajectories to already exists!' % fn_traj
         self.do_pt_generate()
Ejemplo n.º 19
0
 def do_cross_init(self):
     '''
         Add cross terms to the valence list and initialize parameters.
     '''
     with log.section('VAL', 2, 'Initializing'):
         self.reset_system()
         self.valence.init_cross_angle_terms()
         if self.settings.do_cross_DSS or self.settings.do_cross_DSD or self.settings.do_cross_DAD or self.settings.do_cross_DAA:
             self.valence.init_cross_dihed_terms()
         self.update_cross_pars()
Ejemplo n.º 20
0
 def run(self):
     with log.section('PROGRAM', 2):
         self.do_eq_setrv(['EQ_RV'])
         self.do_pt_generate()
         self.do_pt_estimate()
         self.do_pt_postprocess()
         self.do_cross_init()
         self.do_hc_estimatefc(['HC_FC_DIAG', 'HC_FC_CROSS'],
                               logger_level=1)
         self.make_output()
Ejemplo n.º 21
0
 def run(self):
     with log.section('PROGRAM', 2):
         self.do_eq_setrv(['EQ_RV'])
         self.do_pt_generate()
         self.do_pt_estimate()
         self.do_pt_postprocess()
         self.do_hc_estimatefc(['HC_FC_DIAG'])
         self.do_pt_estimate(do_valence=True)
         self.do_hc_estimatefc(['HC_FC_DIAG'], logger_level=1)
         self.make_output()
Ejemplo n.º 22
0
 def do_cross_init(self):
     '''
         Add cross terms to the valence list and initialize parameters.
     '''
     with log.section('VAL', 2, 'Initializing'):
         self.reset_system()
         self.valence.init_cross_angle_terms()
         if self.settings.do_cross_DSS or self.settings.do_cross_DSD or self.settings.do_cross_DAD or self.settings.do_cross_DAA:
             self.valence.init_cross_dihed_terms()
         self.update_cross_pars()
Ejemplo n.º 23
0
    def __init__(self, system, ai, **kwargs):
        '''
            **Arguments**

            system
                a Yaff `System` object defining the system

            ai
                a `Reference` instance corresponding to the ab initio input data

            **Keyword Arguments**

            ffrefs
                a list of `Reference` objects corresponding to a priori determined
                contributions to the force field (such as eg. electrostatics
                or van der Waals contributions)

            fn_yaff
                the name of the file to write the final parameters to in Yaff
                format. The default is `pars.txt`.

            fn_sys
                the name of the file to write the system to. The default is
                `system.chk`.

            fn_traj
                a cPickle filename to read/write the perturbation trajectories
                from/to. If the file exists, the trajectories are read from the
                file. If the file does not exist, the trajectories are written
                to the file.

            only_traj
                specifier to determine for which terms a perturbation trajectory
                needs to be constructed. If ONLY_TRAJ is a single string, it is
                interpreted as a task (only terms that have this task in their
                tasks attribute will get a trajectory). If ONLY_TRAJ is a list
                of strings, each string is interpreted as the basename of the
                term for which a trajectory will be constructed.

            plot_traj
                if set to True, all energy contributions along each perturbation
                trajectory will be plotted using the final force field.

            xyz_traj
                if set to True, each perturbation trajectory will be written to
                an XYZ file.
        '''
        with log.section('PROG', 2, timer='Initializing'):
            log.dump('Initializing program')
            self.system = system
            self.ai = ai
            self.kwargs = kwargs
            self.valence = ValenceFF(system)
            self.perturbation = RelaxedStrain(system, self.valence)
            self.trajectories = None
Ejemplo n.º 24
0
 def run(self):
     with log.section('PROGRAM', 2):
         fn_traj = self.settings.fn_traj
         assert fn_traj is not None, 'The PlotTrajectories program requires a trajectory filename fn_traj!'
         assert os.path.isfile(fn_traj), 'Given file %s to read trajectories does not exists!' %fn_traj
         self.settings.set('xyz_traj', True)
         self.settings.set('plot_traj', 'all')
         self.do_pt_generate()
         self.do_pt_estimate()
         self.plot_trajectories(suffix='_Apt1')
         self.write_trajectories()
Ejemplo n.º 25
0
 def write_trajectories(self):
     '''
         Write perturbation trajectories to XYZ files.
     '''
     only = self.settings.only_traj
     if not isinstance(only, list): only = [only]
     with log.section('XYZ', 3, timer='PT dump XYZ'):
         for trajectory in self.trajectories:
             if trajectory is None: continue
             for pattern in only:
                 if pattern == 'PT_ALL' or pattern in trajectory.term.basename:
                     trajectory.to_xyz()
Ejemplo n.º 26
0
 def run(self):
     with log.section('PROGRAM', 2):
         #deriving diagonal force field
         self.do_pt_generate()
         self.do_pt_estimate()
         self.average_pars()
         self.do_hc_estimatefc(['HC_FC_DIAG'])
         #adding and fitting cross terms
         self.do_cross_init()
         self.do_hc_estimatefc(['HC_FC_CROSS_ASS', 'HC_FC_CROSS_ASA'], logger_level=1)
         #write output
         self.make_output()
Ejemplo n.º 27
0
 def run(self):
     with log.section('PROGRAM', 2):
         fn_traj = self.kwargs.get('fn_traj', None)
         assert fn_traj is not None, 'The PlotTrajectories program requires a trajectory filename fn_traj!'
         assert os.path.isfile(
             fn_traj
         ), 'Given file %s to read trajectories does not exists!' % fn_traj
         self.kwargs['xyz_traj'] = True
         self.kwargs['plot_traj'] = True
         self.do_pt_generate()
         self.do_pt_estimate()
         self.plot_trajectories()
Ejemplo n.º 28
0
 def init_oop_terms(self, thresshold_zero=5e-2*angstrom):
     '''
         Initialize all out-of-plane terms in the system based on the oops
         attribute of the system instance. All oops are given harmonic
         potentials.
     '''
     with log.section('VAL', 3, 'Initializing'):
         #get all dihedrals
         from molmod.ic import opbend_dist, _opdist_low
         ffatypes = [self.system.ffatypes[fid] for fid in self.system.ffatype_ids]
         opdists = {}
         for opdist in self.system.iter_oops():
             opdist, types = term_sort_atypes(ffatypes, opdist, 'opdist')
             if types in opdists.keys():
                 opdists[types].append(opdist)
             else:
                 opdists[types] = [opdist]
         #loop over all distinct opdist types
         nharm = 0
         nsq = 0
         for types, oops in opdists.iteritems():
             d0s = np.zeros(len(oops), float)
             for i, oop in enumerate(oops):
                 if self.system.cell.nvec>0:
                     d01 = self.system.pos[oop[1]]-self.system.pos[oop[0]]
                     d02 = self.system.pos[oop[2]]-self.system.pos[oop[0]]
                     d03 = self.system.pos[oop[3]]-self.system.pos[oop[0]]
                     self.system.cell.mic(d01)
                     self.system.cell.mic(d02)
                     self.system.cell.mic(d03)
                     d0s[i] = abs(_opdist_low(d01, d02, d03, 0)[0])
                 else:
                     rs = np.array([#mind the order, is(or was) wrongly documented in molmod
                         self.system.pos[oop[0]],
                         self.system.pos[oop[1]],
                         self.system.pos[oop[2]],
                         self.system.pos[oop[3]],
                     ])
                     d0s[i] = abs(opbend_dist(rs)[0])
             if d0s.mean()<thresshold_zero: #TODO: check this thresshold
                 #add regular term harmonic in oopdist
                 for oop in oops:
                     term = self.add_term(Harmonic, [OopDist(*oop)], types, ['HC_FC_DIAG'], ['kjmol/A**2', 'A'])
                     self.set_params(term.index, rv0=0.0)
                     nharm += 1
             else:
                 #add term harmonic in square of oopdist
                 log.dump('Mean absolute value of OopDist %s is %.3e A, used SQOOPDIST' %('.'.join(types), d0s.mean()/angstrom))
                 for oop in oops:
                     self.add_term(Harmonic, [SqOopDist(*oop)], types, ['PT_ALL', 'HC_FC_DIAG'], ['kjmol/A**4', 'A**2'])
                     nsq += 1
     log.dump('Added %i Harmonic and %i SquareHarmonic out-of-plane distance terms' %(nharm, nsq))
Ejemplo n.º 29
0
 def write_trajectories(self):
     '''
         Write perturbation trajectories to XYZ files.
     '''
     only = self.settings.only_traj
     if not isinstance(only, list): only = [only]
     with log.section('XYZ', 3, timer='PT dump XYZ'):
         for trajectory in self.trajectories:
             if trajectory is None: continue
             for pattern in only:
                 if pattern=='PT_ALL' or pattern in trajectory.term.basename:
                     log.dump('Writing XYZ trajectory for %s' %trajectory.term.basename)
                     trajectory.to_xyz()
Ejemplo n.º 30
0
 def update_trajectory_terms(self):
     '''
         Routine to make ``self.valence.terms`` and the term attribute of each
         trajectory in ``self.trajectories`` consistent again. This is usefull
         if the trajectory were read from a file and the ``valenceFF`` instance
         was modified.
     '''
     log.dump('Updating terms of trajectories to current valenceFF terms')
     with log.section('PTUPD', 3):
         #update the terms in the trajectories to match the terms in
         #self.valence
         for traj in self.trajectories:
             found = False
             for term in self.valence.iter_terms():
                 if traj.term.get_atoms() == term.get_atoms():
                     if found:
                         raise ValueError(
                             'Found two terms for trajectory %s with atom indices %s'
                             % (traj.term.basename,
                                str(traj.term.get_atoms())))
                     traj.term = term
                     if 'PT_ALL' not in term.tasks:
                         log.dump(
                             'PT_ALL not in tasks of %s-%i, deactivated PT'
                             % (term.basename, term.index))
                         traj.active = False
                     found = True
             if not found:
                 log.warning(
                     'No term found for trajectory %s with atom indices %s, deactivating trajectory'
                     % (traj.term.basename, str(traj.term.get_atoms())))
                 traj.active = False
         #check if every term with task PT_ALL has a trajectory associated
         #with it. It a trajectory is missing, generate it.
         for term in self.valence.iter_terms():
             if 'PT_ALL' not in term.tasks: continue
             found = False
             for traj in self.trajectories:
                 if term.get_atoms() == traj.term.get_atoms():
                     if found:
                         raise ValueError(
                             'Found two trajectories for term %s with atom indices %s'
                             % (term.basename, str(term.get_atoms())))
                     found = True
             if not found:
                 log.warning(
                     'No trajectory found for term %s with atom indices %s. Generating it now.'
                     % (term.basename, str(term.get_atoms())))
                 trajectory = self.perturbation.prepare([term])[0]
                 self.perturbation.generate(trajectory)
                 self.trajectories.append(trajectory)
Ejemplo n.º 31
0
    def do_pt_postprocess(self):
        '''
            Do some first post processing of the ff parameters estimated from
            the perturbation trajectories including:

                * detecting bend patterns with rest values of 90 and 180 deg
                * detecting bend patterns with rest values only close to 180 deg
                * averaging parameters
        '''
        with log.section('PTPOST', 2, timer='PT Post process'):
            self.do_squarebend()
            self.do_bendcharm()
            #self.do_sqoopdist_to_oopdist()
            self.average_pars()
Ejemplo n.º 32
0
 def plot_trajectories(self, do_valence=False, suffix=''):
     '''
         Plot energy contributions along perturbation trajectories and
     '''
     only = self.settings.only_traj
     if not isinstance(only, list): only = [only]
     with log.section('PLOT', 3, timer='PT plot energy'):
         valence = None
         if do_valence: valence=self.valence
         for trajectory in self.trajectories:
             if trajectory is None: continue
             for pattern in only:
                 if pattern=='PT_ALL' or pattern in trajectory.term.basename:
                     log.dump('Plotting trajectory for %s' %trajectory.term.basename)
                     trajectory.plot(self.ai, ffrefs=self.ffrefs, valence=valence, suffix=suffix)
Ejemplo n.º 33
0
 def dump_logger(self, print_level=1):
     if log.log_level < print_level: return
     with log.section('', print_level):
         sequence = [
             'bondharm', 'bendaharm', 'bendcharm', 'bendcos', 'torsion',
             'torsc2harm', 'dihedharm', 'oopdist', 'cross'
         ]
         log.dump('')
         for label in sequence:
             lines = []
             for term in self.iter_masters(label=label):
                 lines.append(term.to_string(self))
             for line in sorted(lines):
                 log.dump(line)
                 log.dump('')
Ejemplo n.º 34
0
 def init_bond_terms(self):
     '''
         Initialize all bond terms in the system based on the bonds attribute
         of the system instance. All bond terms are given harmonic
         potentials.
     '''
     with log.section('VAL', 3, 'Initializing'):
         ffatypes = [self.system.ffatypes[fid] for fid in self.system.ffatype_ids]
         #get the bond terms
         nbonds = 0
         for bond in self.system.iter_bonds():
             bond, types = term_sort_atypes(ffatypes, bond, 'bond')
             units = ['kjmol/A**2', 'A']
             self.add_term(Harmonic, [Bond(*bond)], types, ['PT_ALL', 'HC_FC_DIAG'], units)
             nbonds += 1
     log.dump('Added %i Harmonic bond terms' %nbonds)
Ejemplo n.º 35
0
 def init_bend_terms(self):
     '''
         Initialize all bend terms in the system based on the bends attribute
         of the system instance. All bend terms are given harmonic
         potentials.
     '''
     with log.section('VAL', 3, 'Initializing'):
         ffatypes = [self.system.ffatypes[fid] for fid in self.system.ffatype_ids]
         #get the angle terms
         nbends = 0
         for angle in self.system.iter_angles():
             angle, types = term_sort_atypes(ffatypes, angle, 'angle')
             units = ['kjmol/rad**2', 'deg']
             self.add_term(Harmonic, [BendAngle(*angle)], types, ['PT_ALL', 'HC_FC_DIAG'], units)
             nbends += 1
     log.dump('Added %i Harmonic bend terms' %nbends)
Ejemplo n.º 36
0
    def do_pt_postprocess(self):
        '''
            Do some first post processing of the ff parameters estimated from
            the perturbation trajectories including:

                * detecting bend patterns with rest values of 90 and 180 deg
                * detecting bend patterns with rest values only close to 180 deg
                * transforming SqOopDist with rv=0.0 to OopDist
                * averaging parameters
        '''
        with log.section('PTPOST', 2, timer='PT Post process'):
            if self.settings.do_squarebend:
                self.do_squarebend()
            if self.settings.do_bendclin:
                self.do_bendclin()
            if self.settings.do_sqoopdist_to_oopdist:
                self.do_sqoopdist_to_oopdist()
            self.average_pars()
Ejemplo n.º 37
0
    def do_pt_postprocess(self):
        '''
            Do some first post processing of the ff parameters estimated from
            the perturbation trajectories including:

                * detecting bend patterns with rest values of 90 and 180 deg
                * detecting bend patterns with rest values only close to 180 deg
                * transforming SqOopDist with rv=0.0 to OopDist
                * averaging parameters
        '''
        with log.section('PTPOST', 2, timer='PT Post process'):
            if self.settings.do_squarebend:
                self.do_squarebend()
            if self.settings.do_bendclin:
                self.do_bendclin()
            if self.settings.do_sqoopdist_to_oopdist:
                self.do_sqoopdist_to_oopdist()
            self.average_pars()
Ejemplo n.º 38
0
    def __init__(self, fn=None, **kwargs):
        '''
            **Keyword Arguments**

            fn
                file name of a config file from which settings can be read.
                Each line contains a single setting (except the lines starting
                with a #) and should have the following syntax

                    key: value

            kwargs
                each setting can also be parsed directly through the use of
                keyword arguments in the __init__ constructor with the syntax
                key=value. The settings parsed through the use of kwargs
                overwrite those in the config file.

        '''
        #first read general RC settings from .quickffrc file
        from .context import context
        self.read_config_file(context.get_fn('quickffrc'))
        #if a config file is provided, read settings from this file and
        #overwrite the general RC settings
        if fn is not None:
            self.read_config_file(fn)
        #if settings are defined through keyword arguments to this init
        #constructor, read these settings and overwrite general RC as
        #wel as config file settings
        for key, value in kwargs.items():
            #don't impose keyword argument that hasn't been set
            if value is None: continue
            key = key.lstrip().rstrip()
            if key=='suffix':
                continue
            self.set(key, value)
        if 'suffix' in list(kwargs.keys()) and kwargs['suffix'] is not None:
            self._set_suffix(kwargs['suffix'])
        self.check()
        self._set_log()

        with log.section('SETT', 2, 'Initializing'):
            self.dump_log()
Ejemplo n.º 39
0
    def update_cross_pars(self):
        '''
            Set the rest values of cross terms to the rest values of the
            corresponding diagonal terms. Set the force constants to zero.
        '''
        with log.section('VAL', 2, 'Initializing'):
            def find_rest_value(iterm):
                term = self.valence.terms[iterm]
                if term.basename.startswith('TorsCheby') or term.basename.startswith('BendCheby'):
                    return -self.valence.get_params(iterm, only='sign')
                else:
                    return self.valence.get_params(iterm, only='rv')

            # Bond-Bond Cross terms
            cases = [('Cross','bb',3),('Cross','b0a',3),('Cross','b1a',3)]

            # Bond-Dihedral Cross terms
            for m in [1,2,3,4,6]:
                for suffix in ['bb','b0d','b1d','b2d']:
                    case = ('CrossBondDih%i'%m,suffix,4)
                    cases.append(case)

            # Angle-Dihedral Cross terms
            for m in [1,2,3,4,6]:
                for suffix in ['aa','a0d','a1d']:
                    case = ('CrossBendDih%i'%m,suffix,4)
                    cases.append(case)

                for suffix in ['a0d','a1d']:
                    case = ('CrossCBendDih%i'%m,suffix,4)
                    cases.append(case)

            # Loop over all cases
            for prefix, suffix, ntypes in cases:
                # Loop over all cross terms belonging to this case
                for term in self.valence.iter_masters('^%s/.*/%s$'%(prefix,suffix), use_re=True):
                    types = term.basename.split('/')[1].split('.')
                    assert len(types)==ntypes, 'Found cross term with %d atom types, expected %d'%(len(types),ntype)
                    rv0 = find_rest_value(term.diag_term_indexes[0])
                    rv1 = find_rest_value(term.diag_term_indexes[1])
                    self.valence.set_params(term.index, fc=0.0, rv0=rv0, rv1=rv1)
                    for index in term.slaves: self.valence.set_params(index, fc=0.0, rv0=rv0, rv1=rv1)
Ejemplo n.º 40
0
 def update_trajectory_terms(self):
     '''
         Routine to make ``self.valence.terms`` and the term attribute of each
         trajectory in ``self.trajectories`` consistent again. This is usefull
         if the trajectory were read from a file and the ``valenceFF`` instance
         was modified.
     '''
     log.dump('Updating terms of trajectories to current valenceFF terms')
     with log.section('PTUPD', 3):
         #update the terms in the trajectories to match the terms in
         #self.valence
         for traj in self.trajectories:
             found = False
             for term in self.valence.iter_terms():
                 if traj.term.get_atoms()==term.get_atoms():
                     if found: raise ValueError('Found two terms for trajectory %s with atom indices %s' %(traj.term.basename, str(traj.term.get_atoms())))
                     traj.term = term
                     if 'PT_ALL' not in term.tasks:
                         log.dump('PT_ALL not in tasks of %s-%i, deactivated PT' %(term.basename, term.index))
                         traj.active = False
                     found = True
             if not found:
                 log.warning('No term found for trajectory %s with atom indices %s, deactivating trajectory' %(traj.term.basename, str(traj.term.get_atoms())))
                 traj.active = False
         #check if every term with task PT_ALL has a trajectory associated
         #with it. It a trajectory is missing, generate it.
         for term in self.valence.iter_terms():
             if 'PT_ALL' not in term.tasks: continue
             found = False
             for traj in self.trajectories:
                 if term.get_atoms()==traj.term.get_atoms():
                     if found: raise ValueError('Found two trajectories for term %s with atom indices %s' %(term.basename, str(term.get_atoms())))
                     found =True
             if not found:
                 log.warning('No trajectory found for term %s with atom indices %s. Generating it now.' %(term.basename, str(term.get_atoms())))
                 trajectory = self.perturbation.prepare([term])[term.index]
                 self.perturbation.generate(trajectory)
                 self.trajectories.append(trajectory)
Ejemplo n.º 41
0
 def print_system(self):
     '''
         dump overview of atoms (and associated parameters) in the system
     '''
     with log.section('SYS', 3, timer='Initializing'):
         log.dump('Atomic configuration of the system:')
         log.dump('')
         log.dump('  index  |  x [A]  |  y [A]  |  z [A]  | ffatype |    q    |  R [A]  ')
         log.dump('---------------------------------------------------------------------')
         for i in range(len(self.system.numbers)):
             x, y, z = self.system.pos[i,0], self.system.pos[i,1], self.system.pos[i,2]
             if self.system.charges is not None:
                 q = self.system.charges[i]
             else:
                 q = np.nan
             if self.system.radii is not None:
                 R = self.system.radii[i]
             else:
                 R = np.nan
             log.dump('  %4i   | % 7.3f | % 7.3f | % 7.3f |  %6s | % 7.3f | % 7.3f ' %(
                 i, x/angstrom, y/angstrom, z/angstrom,
                 self.system.ffatypes[self.system.ffatype_ids[i]],
                 q, R/angstrom
             ))
Ejemplo n.º 42
0
class Program(BaseProgram):
    def run(self):
        with log.section('PROGRAM', 2):
            #deriving diagonal force field
            self.do_pt_generate()
            self.do_pt_estimate()
            self.average_pars()
            self.do_hc_estimatefc(['HC_FC_DIAG'])
            #adding and fitting cross terms
            self.do_cross_init()
            self.do_hc_estimatefc(['HC_FC_CROSS_ASS', 'HC_FC_CROSS_ASA'], logger_level=1)
            #write output
            self.make_output()

#settings
with log.section('INIT', 2, timer='Initialization'):
    settings = Settings(fn_yaff='pars_cov.txt', plot_traj='All', xyz_traj=True)

#load Gaussian Formatted Checkpoint file
fchk = FCHKFile('gaussian.fchk')
numbers = fchk.fields.get('Atomic numbers')
energy = fchk.fields.get('Total Energy')
coords = fchk.fields.get('Current cartesian coordinates').reshape([len(numbers), 3])
grad = fchk.fields.get('Cartesian Gradient').reshape([len(numbers), 3])
hess = fchk.get_hessian().reshape([len(numbers), 3, len(numbers), 3])

#Construct Yaff System file
system = System(numbers, coords)
system.detect_bonds()
system.set_standard_masses()
Ejemplo n.º 43
0
 def dump_log(self):
     sorted_keys = sorted(self.__dict__.keys())
     with log.section('SETT', 3):
         for key in sorted_keys:
             value = str(self.__dict__[key])
             log.dump('%s  %s' %(key+' '*(30-len(key)), value))
Ejemplo n.º 44
0
    def generate(self, trajectory, remove_com=True):
        '''
            Method to calculate the perturbation trajectory, i.e. the trajectory
            that scans the geometry along the direction of the ic figuring in
            the term with the given index (should be a diagonal term). This
            method should be implemented in the derived classes.

            **Arguments**

            trajectory
                instance of Trajectory class representing the perturbation
                trajectory

            **Optional Arguments**

            remove_com
                if set to True, removes the center of mass translation from the
                resulting perturbation trajectories [default=True].
        '''
        #TODO: find out why system.cell is not parsed correctly when using scoop
        #force correct rvecs
        self.system0.cell.update_rvecs(self.system_rvecs)
        with log.section('PTGEN', 4, timer='PT Generate'):
            log.dump('  Generating %s(atoms=%s)' %(trajectory.term.basename, trajectory.term.get_atoms()))
            strain = Strain(self.system, trajectory.term, self.valence.terms)
            natom = self.system0.natom
            q0 = self.valence.iclist.ictab[self.valence.vlist.vtab[trajectory.term.index]['ic0']]['value']
            diag = np.array([0.1*angstrom,]*3*natom+[abs(q0-trajectory.targets[0])])
            sol = None
            for iq, target in enumerate(trajectory.targets):
                log.dump('    Frame %i (target=%.3f)' %(iq, target))
                strain.constrain_target = target
                if abs(target-q0)<1e-6:
                    sol = np.zeros([strain.ndof+1],float)
                    #call strain.gradient once to compute/store/log relevant information
                    strain.gradient(sol)
                else:
                    if sol is not None:
                        init = sol.copy()
                    else:
                        init = np.zeros([3*natom+1], float)
                    init[-1] = np.sign(q0-target)
                    sol, infodict, ier, mesg = scipy.optimize.fsolve(strain.gradient, init, xtol=self.settings.pert_traj_tol, full_output=True, diag=diag)
                    if ier!=1:
                        #fsolve did not converge, try again after adding small random noise
                        log.dump('      %s' %mesg.replace('\n', ' '))
                        log.dump('    Frame %i (target=%.3f) %s(%s) did not converge. Trying again with slightly perturbed initial conditions.' %(
                            iq, target, trajectory.term.basename, trajectory.term.get_atoms()
                        ))
                        #try one more time
                        init = sol.copy()
                        init[:3*natom] += np.random.normal(0.0, 0.01, [3*natom])*angstrom
                        sol, infodict, ier, mesg = scipy.optimize.fsolve(strain.gradient, init, xtol=self.settings.pert_traj_tol, full_output=True, diag=diag)
                        #fsolve did STILL not converge, flag this frame for deletion
                        if ier!=1:
                            log.dump('      %s' %mesg.replace('\n', ' '))
                            log.dump('    Frame %i (target=%.3f) %s(%s) STILL did not converge.' %(
                                iq, target, trajectory.term.basename, trajectory.term.get_atoms()
                            ))
                            trajectory.targets[iq] = np.nan
                            continue
                x = self.system0.pos.copy() + sol[:3*natom].reshape((-1,3))
                trajectory.values[iq] = strain.constrain_value
                log.dump('    Converged (value=%.3f, lagmult=%.3e)' %(strain.constrain_value,sol[3*natom]))
                if remove_com:
                    com = (x.T*self.system0.masses.copy()).sum(axis=1)/self.system0.masses.sum()
                    for i in range(natom):
                        x[i,:] -= com
                trajectory.coords[iq,:,:] = x
            #delete flagged frames
            targets = []
            values = []
            coords = []
            for target, value, coord in zip(trajectory.targets, trajectory.values, trajectory.coords):
                if not np.isnan(target):
                    targets.append(target)
                    values.append(value)
                    coords.append(coord)
            trajectory.targets = np.array(targets)
            trajectory.values = np.array(values)
            trajectory.coords = np.array(coords)
        return trajectory
Ejemplo n.º 45
0
    def estimate(self, trajectory, ai, ffrefs=[], do_valence=False, energy_noise=None, Nerrorsteps=100):
        '''
            Method to estimate the FF parameters for the relevant ic from the
            given perturbation trajectory by fitting a harmonic potential to the
            covalent energy along the trajectory.

            **Arguments**

            trajectory
                a Trajectory instance representing the perturbation trajectory

            ai
                an instance of the Reference representing the ab initio input

            **Optional Arguments**

            ffrefs
                a list of Reference instances representing possible a priori
                determined contributions to the force field (such as eg.
                electrostatics and van der Waals)

            do_valence
                If set to True, the current valence force field (stored in
                self.valence) will be used to compute the valence contribution

            energy_noise
                If set to a float, the parabolic fitting will be repeated
                Nerrorsteps times including normal noise on top of the reference
                value. The mean of the noise is 0, while the std equals the
                number given by energy_noise. The resulting fits give a
                distribution of force  constants and rest values instead of
                single value, the std is used to identify bad estimates, the
                mean is used for the actual FF parametrs. If set to nan, the
                parabolic fit is performed only once without any noise.
        '''
        with log.section('PTEST', 3, timer='PT Estimate'):
            term = trajectory.term
            index = term.index
            basename = term.basename
            if 'active' in list(trajectory.__dict__.keys()) and not trajectory.active:
                log.dump('Trajectory of %s was deactivated: skipping' %(basename))
                return
            qs = trajectory.values.copy()
            AIs = np.zeros(len(trajectory.coords))
            FFs = np.zeros(len(trajectory.coords))
            RESs = np.zeros(len(trajectory.coords))
            for istep, pos in enumerate(trajectory.coords):
                AIs[istep] = ai.energy(pos)
                for ref in ffrefs:
                    FFs[istep] += ref.energy(pos)
            if do_valence:
                fc = self.valence.get_params(index, only='fc')
                rv = self.valence.get_params(index, only='rv')
                self.valence.set_params(index, fc=0.0)
                self.valence.set_params(index, rv0=0.0)
                for istep, pos in enumerate(trajectory.coords):
                    RESs[istep] += self.valence.calc_energy(pos) #- 0.5*fc*(qs[istep]-rv)**2
                self.valence.set_params(index, fc=fc)
                self.valence.set_params(index, rv0=rv)
            pars = fitpar(qs, AIs-FFs-RESs-min(AIs-FFs-RESs), rcond=-1)
            if energy_noise is None:
                if pars[0]!=0.0:
                    trajectory.fc = 2.0*pars[0]
                    trajectory.rv = -pars[1]/(2.0*pars[0])
                else:
                    trajectory.fc = 0.0
                    trajectory.rv = qs[len(qs)//2]
                    log.dump('force constant of %s is zero: rest value set to middle value' %basename)
            else:
                with log.section('PTEST', 4, timer='PT Estimate'):
                    log.dump('Performing noise analysis for trajectory of %s' %basename)
                    As = [pars[0]]
                    Bs = [pars[1]]
                    for i in range(Nerrorsteps):
                        pars = fitpar(qs, AIs-FFs-RESs-min(AIs-FFs-RESs)+np.random.normal(0.0, energy_noise, size=AIs.shape), rcond=-1)
                        As.append(pars[0])
                        Bs.append(pars[1])
                    if 0.0 in As:
                        log.dump('  force constant of zero detected, removing the relevant runs from analysis')
                    Bs = np.array([b for a,b in zip(As,Bs) if a!=0.0])
                    As = np.array([a for a in As if a!=0.0])
                    ks = As*2.0
                    q0s = -Bs/(2.0*As)
                    kunit = trajectory.term.units[0]
                    qunit = trajectory.term.units[1]
                    log.dump('    k  = %8.3f +- %6.3f (noisefree: %8.3f) %s' %(ks.mean()/parse_unit(kunit), ks.std()/parse_unit(kunit), ks[0]/parse_unit(kunit), kunit))
                    log.dump('    q0 = %8.3f +- %6.3f (noisefree: %8.3f) %s' %(q0s.mean()/parse_unit(qunit), q0s.std()/parse_unit(qunit), q0s[0]/parse_unit(qunit), qunit))
                    if q0s.std()/q0s.mean()>0.01:
                        with log.section('PTEST', 3, timer='PT Estimate'):
                            fc, rv = self.valence.get_params(trajectory.term.index)
                            if rv is None:
                                log.dump('Noise on rest value of %s to high, using ab initio rest value' %basename)
                                pars = fitpar(qs, AIs-FFs-RESs-min(AIs-FFs-RESs)+np.random.normal(0.0, energy_noise, size=AIs.shape), rcond=-1)
                                if pars[0]!=0.0:
                                    trajectory.fc = 2.0*pars[0]
                                    trajectory.rv = -pars[1]/(2.0*pars[0])
                                else:
                                    trajectory.fc = 0.0
                                    trajectory.rv = qs[len(qs)//2]
                                    log.dump('AI force constant of %s is zero: rest value set to middle value' %basename)
                            else:
                                log.dump('Noise on rest value of %s to high, using previous value' %basename)
                                trajectory.fc = fc
                                trajectory.rv = rv
                    else:
                        trajectory.fc = ks.mean()
                        trajectory.rv = q0s.mean()
            #no negative rest values for all ics except dihedrals and bendcos
            if term.ics[0].kind not in [1,3,4,11]:
                if trajectory.rv<0:
                    trajectory.rv = 0.0
                    log.dump('rest value of %s was negative: set to zero' %basename)
Ejemplo n.º 46
0
 def run(self):
     with log.section('PROGRAM', 2):
         fn_traj = self.settings.fn_traj
         assert fn_traj is not None, 'It is useless to run the MakeTrajectories program without specifying a trajectory filename fn_traj!'
         assert not os.path.isfile(fn_traj), 'Given file %s to store trajectories to already exists!' %fn_traj
         self.do_pt_generate()
Ejemplo n.º 47
0
    def do_hc_estimatefc(self, tasks, logger_level=3, do_svd=False, svd_rcond=0.0, do_mass_weighting=True):
        '''
            Refine force constants using Hessian Cost function.

            **Arguments**

            tasks
                A list of strings identifying which terms should have their
                force constant estimated from the hessian cost function. Using
                such a flag, one can distinguish between for example force
                constant refinement (flag=HC_FC_DIAG) of the diagonal terms and
                force constant estimation of the cross terms (flag=HC_FC_CROSS).
                If the string 'all' is present in tasks, all fc's will be
                estimated.

            **Optional Arguments**

            logger_level
                print level at which the resulting parameters should be dumped to
                the logger. By default, the parameters will only be dumped at
                the highest log level.

            do_svd
                whether or not to do an SVD decomposition before solving the
                set of equations and explicitly throw out the degrees of
                freedom that correspond to the lowest singular values.

            do_mass_weighting
                whether or not to apply mass weighing to the ab initio hessian
                and the force field contributions before doing the fitting.
        '''
        with log.section('HCEST', 2, timer='HC Estimate FC'):
            self.reset_system()
            log.dump('Estimating force constants from Hessian cost for tasks %s' %' '.join(tasks))
            term_indices = []
            for index in range(self.valence.vlist.nv):
                term = self.valence.terms[index]
                flagged = False
                for flag in tasks:
                    if flag in term.tasks:
                        flagged = True
                        break
                if flagged:
                    #first check if all rest values and multiplicities have been defined
                    if term.kind==0: self.valence.check_params(term, ['rv'])
                    if term.kind==1: self.valence.check_params(term, ['a0', 'a1', 'a2', 'a3'])
                    if term.kind==3: self.valence.check_params(term, ['rv0','rv1'])
                    if term.kind==4: self.valence.check_params(term, ['rv', 'm'])
                    if term.is_master():
                        term_indices.append(index)
                else:
                    #first check if all pars have been defined
                    if term.kind==0: self.valence.check_params(term, ['fc', 'rv'])
                    if term.kind==1: self.valence.check_params(term, ['a0', 'a1', 'a2', 'a3'])
                    if term.kind==3: self.valence.check_params(term, ['fc', 'rv0','rv1'])
                    if term.kind==4: self.valence.check_params(term, ['fc', 'rv', 'm'])
            if len(term_indices)==0:
                log.dump('No terms (with task in %s) found to estimate FC from HC' %(str(tasks)))
                return
            # Try to estimate force constants; if the remove_dysfunctional_cross
            # keyword is True, a loop is performed which checks whether there
            # are cross terms for which corresponding diagonal terms have zero
            # force constants. If this is the case, those cross terms are removed
            # from the fit and we try again until such cases do no longer occur
            max_iter = 100
            niter = 0
            while niter<max_iter:
                cost = HessianFCCost(self.system, self.ai, self.valence, term_indices, ffrefs=self.ffrefs, do_mass_weighting=do_mass_weighting)
                fcs = cost.estimate(do_svd=do_svd, svd_rcond=svd_rcond)
                # No need to continue, if cross terms with corresponding diagonal
                # terms with negative force constants are allowed
                if self.settings.remove_dysfunctional_cross is False: break
                to_remove = []
                for index, fc in zip(term_indices, fcs):
                    term = self.valence.terms[index]
                    if term.basename.startswith('Cross'):
                        # Find force constants of corresponding diagonal terms
                        diag_fcs = np.zeros((2))
                        for idiag in range(2):
                            diag_index = term.diag_term_indexes[idiag]
                            if diag_index in term_indices:
                                fc_diag = fcs[term_indices.index(diag_index)]
                            else:
                                fc_diag = self.valence.get_params(diag_index, only='fc')
                            diag_fcs[idiag] = fc_diag
                        # If a force constant from any corresponding diagonal term is negative,
                        # we remove the cross term for the next iteration
                        if np.any(diag_fcs<=0.0):
                            to_remove.append(index)
                            self.valence.set_params(index, fc=0.0)
                            log.dump('WARNING! Dysfunctional cross term %s detected, removing from the hessian fit.'%term.basename)
                if len(to_remove)==0: break
                else:
                    for index in to_remove:
                        term_indices.remove(index)
                niter += 1
            assert niter<max_iter, "Could not remove all dysfunctional cross terms in %d iterations, something is seriously wrong"%max_iter
            for index, fc in zip(term_indices, fcs):
                master = self.valence.terms[index]
                assert master.is_master()
                self.valence.set_params(index, fc=fc)
                for islave in master.slaves:
                    self.valence.set_params(islave, fc=fc)
            self.valence.dump_logger(print_level=logger_level)