예제 #1
0
 def npt_simulation(self, temperature, pressure, simnum):
     """ Submit a NPT simulation to the Work Queue. """
     wq = getWorkQueue()
     if not os.path.exists('npt_result.p'):
         link_dir_contents(os.path.join(self.root, self.rundir),
                           os.getcwd())
         self.last_traj += [
             os.path.join(os.getcwd(), i) for i in self.extra_output
             if '.gro' not in i
         ]
         prev_iter_ICs = os.path.join(os.getcwd(), "lipid.gro")
         if not os.path.exists(prev_iter_ICs):
             self.lipid_mol[simnum % len(self.lipid_mol)].write(
                 self.lipid_coords,
                 ftype='tinker' if self.engname == 'tinker' else None)
         cmdstr = '%s python npt_lipid.py %s %.3f %.3f' % (
             self.nptpfx, self.engname, temperature, pressure)
         if wq == None:
             logger.info("Running condensed phase simulation locally.\n")
             logger.info(
                 "You may tail -f %s/npt.out in another terminal window\n" %
                 os.getcwd())
             _exec(cmdstr, copy_stderr=True, outfnm='npt.out')
         else:
             queue_up(wq,
                      command=cmdstr + ' &> npt.out',
                      input_files=self.nptfiles + self.scripts +
                      ['forcebalance.p'],
                      output_files=['npt_result.p', 'npt.out'] +
                      self.extra_output,
                      tgt=self)
예제 #2
0
    def run_simulation(self, label, liq, AGrad=True):
        """ 
        Submit a simulation to the Work Queue or run it locally.

        Inputs:
        label = The name of the molecule (and hopefully the folder name that you're running in)
        liq = True/false flag indicating whether to run in liquid or gas phase
        """
        wq = getWorkQueue()

        # Create a dictionary of MD options that the script will read.
        md_opts = OrderedDict()
        md_opts['temperature'] = self.hfe_temperature
        md_opts['pressure'] = self.hfe_pressure
        md_opts['minimize'] = True
        if liq: 
            sdnm = 'liq'
            md_opts['nequil'] = self.liquid_eq_steps
            md_opts['nsteps'] = self.liquid_md_steps
            md_opts['timestep'] = self.liquid_timestep
            md_opts['sample'] = self.liquid_interval
        else: 
            sdnm = 'gas'
            md_opts['nequil'] = self.gas_eq_steps
            md_opts['nsteps'] = self.gas_md_steps
            md_opts['timestep'] = self.gas_timestep
            md_opts['sample'] = self.gas_interval

        eng_opts = deepcopy(self.engine_opts)
        # Enforce implicit solvent in the liquid simulation.
        # We need to be more careful with this when running explicit solvent. 
        eng_opts['implicit_solvent'] = liq
        eng_opts['coords'] = os.path.basename(self.molecules[label])

        if not os.path.exists(sdnm):
            os.makedirs(sdnm)
        os.chdir(sdnm)
        if not os.path.exists('md_result.p'):
            # Link in a bunch of files... what were these again?
            link_dir_contents(os.path.join(self.root,self.rundir),os.getcwd())
            # Link in the scripts required to run the simulation
            for f in self.scripts:
                LinkFile(os.path.join(os.path.split(__file__)[0],"data",f),os.path.join(os.getcwd(),f))
            # Link in the coordinate file.
            LinkFile(self.molecules[label], './%s' % os.path.basename(self.molecules[label]))
            # Store names of previous trajectory files.
            self.last_traj += [os.path.join(os.getcwd(), i) for i in self.extra_output]
            # Write target, engine and simulation options to disk.
            lp_dump((self.OptionDict, eng_opts, md_opts), 'simulation.p')
            # Execute the script for running molecular dynamics.
            cmdstr = '%s python md_ism_hfe.py %s' % (self.prefix, "-g" if AGrad else "")
            if wq == None:
                logger.info("Running condensed phase simulation locally.\n")
                logger.info("You may tail -f %s/npt.out in another terminal window\n" % os.getcwd())
                _exec(cmdstr, copy_stderr=True, outfnm='md.out')
            else:
                queue_up(wq, command = cmdstr+' &> md.out', tag='%s:%s/%s' % (self.name, label, "liq" if liq else "gas"),
                         input_files = self.scripts + ['simulation.p', 'forcefield.p', os.path.basename(self.molecules[label])],
                         output_files = ['md_result.p', 'md.out'] + self.extra_output, tgt=self, verbose=False, print_time=3600)
        os.chdir('..')
예제 #3
0
 def npt_simulation(self, temperature, pressure, simnum):
     """ Submit a NPT simulation to the Work Queue. """
     wq = getWorkQueue()
     if not (os.path.exists('npt_result.p') or os.path.exists('npt_result.p.bz2')):
         link_dir_contents(os.path.join(self.root,self.rundir),os.getcwd())
         if wq == None:
             print "Running condensed phase simulation locally."
             print "You may tail -f %s/npt.out in another terminal window" % os.getcwd()
             # if GoodStep() and (temperature, pressure) in self.DynDict_New:
             #     self.DynDict[(temperature, pressure)] = self.DynDict_New[(temperature, pressure)]
             # if (temperature, pressure) in self.DynDict:
             #     dynsrc = self.DynDict[(temperature, pressure)]
             #     dyndest = os.path.join(os.getcwd(), 'liquid.dyn')
             #     print "Copying .dyn file: %s to %s" % (dynsrc, dyndest)
             #     shutil.copy2(dynsrc,dyndest)
             cmdstr = 'python npt.py gromacs %i %.3f %.3f %.3f %.3f %s --liquid_equ_steps %i &> npt.out' % \
                 (self.liquid_prod_steps, self.liquid_timestep, self.liquid_interval, temperature, pressure, " --minimize_energy" if self.minimize_energy else "", self.liquid_equ_steps)
             _exec(cmdstr)
             # self.DynDict_New[(temperature, pressure)] = os.path.join(os.getcwd(),'liquid.dyn')
         else:
             # This part of the code has never been used before
             # Still need to figure out where to specify GROMACS location on each cluster
             # queue_up(wq,
             #          command = 'python npt.py liquid.xyz %.3f %.3f &> npt.out' % (temperature, pressure),
             #          input_files = ['liquid.xyz','liquid.key','mono.xyz','mono.key','forcebalance.p','npt.py'],
             #          output_files = ['npt_result.p.bz2', 'npt.py'] + self.FF.fnms,
             #          tgt=self)
             raise RuntimeError('Remote GROMACS execution is not yet enabled')
예제 #4
0
 def driver(self, mvals, d):
     ## Create the force field file.
     pvals = self.FF.make(mvals)
     ## Actually run PSI4.
     odir = os.path.join(os.getcwd(),d)
     #if os.path.exists(odir):
     #    shutil.rmtree(odir)
     if not os.path.exists(odir): os.makedirs(odir)
     os.chdir(odir)
     o = wopen('objective.dat')
     for line in self.objfiles[d]:
         s = line.split()
         if len(s) > 2 and s[0] == 'path' and s[1] == '=':
             print >> o, "path = '%s'" % self.tdir
         elif len(s) > 2 and s[0] == 'set' and s[1] == 'objective_path':
             print >> o, "opath = '%s'" % os.getcwd()
             print >> o, "set objective_path $opath"
         else:
             print >> o, line,
     o.close()
     os.system("rm -f objective.out")
     _exec("psi4 objective.dat", print_command=False)
     answer = float(open('objective.out').readlines()[0].split()[1])*self.factor
     os.chdir('..')
     return answer
예제 #5
0
    def run_simulation(self, label, liq, AGrad=True):
        """ 
        Submit a simulation to the Work Queue or run it locally.

        Inputs:
        label = The name of the molecule (and hopefully the folder name that you're running in)
        liq = True/false flag indicating whether to run in liquid or gas phase
        """
        wq = getWorkQueue()

        # Create a dictionary of MD options that the script will read.
        md_opts = OrderedDict()
        md_opts['temperature'] = self.hfe_temperature
        md_opts['pressure'] = self.hfe_pressure
        md_opts['minimize'] = True
        if liq: 
            sdnm = 'liq'
            md_opts['nequil'] = self.liquid_eq_steps
            md_opts['nsteps'] = self.liquid_md_steps
            md_opts['timestep'] = self.liquid_timestep
            md_opts['sample'] = self.liquid_interval
        else: 
            sdnm = 'gas'
            md_opts['nequil'] = self.gas_eq_steps
            md_opts['nsteps'] = self.gas_md_steps
            md_opts['timestep'] = self.gas_timestep
            md_opts['sample'] = self.gas_interval

        eng_opts = deepcopy(self.engine_opts)
        # Enforce implicit solvent in the liquid simulation.
        # We need to be more careful with this when running explicit solvent. 
        eng_opts['implicit_solvent'] = liq
        eng_opts['coords'] = os.path.basename(self.molecules[label])

        if not os.path.exists(sdnm):
            os.makedirs(sdnm)
        os.chdir(sdnm)
        if not os.path.exists('md_result.p'):
            # Link in a bunch of files... what were these again?
            link_dir_contents(os.path.join(self.root,self.rundir),os.getcwd())
            # Link in the scripts required to run the simulation
            for f in self.scripts:
                LinkFile(os.path.join(os.path.split(__file__)[0],"data",f),os.path.join(os.getcwd(),f))
            # Link in the coordinate file.
            LinkFile(self.molecules[label], './%s' % os.path.basename(self.molecules[label]))
            # Store names of previous trajectory files.
            self.last_traj += [os.path.join(os.getcwd(), i) for i in self.extra_output]
            # Write target, engine and simulation options to disk.
            lp_dump((self.OptionDict, eng_opts, md_opts), 'simulation.p')
            # Execute the script for running molecular dynamics.
            cmdstr = '%s python md_ism_hfe.py %s' % (self.prefix, "-g" if AGrad else "")
            if wq is None:
                logger.info("Running condensed phase simulation locally.\n")
                logger.info("You may tail -f %s/npt.out in another terminal window\n" % os.getcwd())
                _exec(cmdstr, copy_stderr=True, outfnm='md.out')
            else:
                queue_up(wq, command = cmdstr+' &> md.out', tag='%s:%s/%s' % (self.name, label, "liq" if liq else "gas"),
                         input_files = self.scripts + ['simulation.p', 'forcefield.p', os.path.basename(self.molecules[label])],
                         output_files = ['md_result.p', 'md.out'] + self.extra_output, tgt=self, verbose=False, print_time=3600)
        os.chdir('..')
예제 #6
0
 def driver(self, mvals, d):
     ## Create the force field file.
     pvals = self.FF.make(mvals)
     ## Actually run PSI4.
     odir = os.path.join(os.getcwd(), d)
     #if os.path.exists(odir):
     #    shutil.rmtree(odir)
     if not os.path.exists(odir): os.makedirs(odir)
     os.chdir(odir)
     o = wopen('objective.dat')
     for line in self.objfiles[d]:
         s = line.split()
         if len(s) > 2 and s[0] == 'path' and s[1] == '=':
             print("path = '%s'" % self.tdir, file=o)
         elif len(s) > 2 and s[0] == 'set' and s[1] == 'objective_path':
             print("opath = '%s'" % os.getcwd(), file=o)
             print("set objective_path $opath", file=o)
         else:
             print(line, end=' ', file=o)
     o.close()
     os.system("rm -f objective.out")
     _exec("psi4 objective.dat", print_command=False)
     answer = float(
         open('objective.out').readlines()[0].split()[1]) * self.factor
     os.chdir('..')
     return answer
예제 #7
0
    def submit_jobs(self, mvals, AGrad=True, AHess=True):
        """This routine is called by Objective.stage() and will run before "get".
        It submits the jobs and the stage() function will wait for jobs
        to complete.

        Parameters
        ----------
        mvals : list
            Mathematical parameter values.
        AGrad : Boolean
            Switch to turn on analytic gradient.
        AHess : Boolean
            Switch to turn on analytic Hessian.

        Returns
        -------
        Nothing.
        
        """
        # Set up and run the simulation chain on all points.
        for pt in self.points:
            # Create subdir
            try:
                os.makedirs(str(pt.idnr))
            except OSError as exception:
                if exception.errno != errno.EEXIST:
                    raise            
                
            # Goto subdir
            os.chdir(str(pt.idnr))

            # Link dir contents from target subdir to current temp directory.
            for f in self.scripts:
                LinkFile(os.path.join(self.root, self.tempdir, f),
                         os.path.join(os.getcwd(), f))
                
            link_dir_contents(os.path.join(self.root, self.tgtdir,
                                           str(pt.idnr)), os.getcwd())
            
            # Dump the force field to a pickle file
            with wopen('forcebalance.p') as f:
                lp_dump((self.FF, mvals, self.OptionDict, AGrad), f)
                
            # Run the simulation chain for point.        
            cmdstr = ("%s python md_chain.py " % self.mdpfx +
                      " ".join(self.quantities) + " " +
                      "--engine %s " % self.engname +
                      "--length %d " % self.n_sim_chain + 
                      "--name %s " % self.simpfx +
                      "--temperature %f " % pt.temperature +
                      "--pressure %f " % pt.pressure +
                      "--nequil %d " % self.eq_steps +
                      "--nsteps %d " % self.md_steps)
            _exec(cmdstr, copy_stderr=True, outfnm='md_chain.out')
        
            os.chdir('..')
예제 #8
0
    def submit_jobs(self, mvals, AGrad=True, AHess=True):
        """This routine is called by Objective.stage() and will run before "get".
        It submits the jobs and the stage() function will wait for jobs
        to complete.

        Parameters
        ----------
        mvals : list
            Mathematical parameter values.
        AGrad : Boolean
            Switch to turn on analytic gradient.
        AHess : Boolean
            Switch to turn on analytic Hessian.

        Returns
        -------
        Nothing.
        
        """
        # Set up and run the simulation chain on all points.
        for pt in self.points:
            # Create subdir
            try:
                os.makedirs(str(pt.idnr))
            except OSError as exception:
                if exception.errno != errno.EEXIST:
                    raise            
                
            # Goto subdir
            os.chdir(str(pt.idnr))

            # Link dir contents from target subdir to current temp directory.
            for f in self.scripts:
                LinkFile(os.path.join(self.root, self.tempdir, f),
                         os.path.join(os.getcwd(), f))
                
            link_dir_contents(os.path.join(self.root, self.tgtdir,
                                           str(pt.idnr)), os.getcwd())
            
            # Dump the force field to a pickle file
            lp_dump((self.FF, mvals, self.OptionDict, AGrad), 'forcebalance.p')
                
            # Run the simulation chain for point.        
            cmdstr = ("%s python md_chain.py " % self.mdpfx +
                      " ".join(self.quantities) + " " +
                      "--engine %s " % self.engname +
                      "--length %d " % self.n_sim_chain + 
                      "--name %s " % self.simpfx +
                      "--temperature %f " % pt.temperature +
                      "--pressure %f " % pt.pressure +
                      "--nequil %d " % self.eq_steps +
                      "--nsteps %d " % self.md_steps)
            _exec(cmdstr, copy_stderr=True, outfnm='md_chain.out')
        
            os.chdir('..')
예제 #9
0
 def write_nested_destroy(self, fnm, linedestroy):
     ln0 = range(len(open(fnm).readlines()))
     for layer in linedestroy:
         f = open(fnm).readlines()
         o = wopen('.tmp.gbs')
         newln = []
         for ln, line in enumerate(f):
             if ln not in layer:
                 print >> o, line,
                 newln.append(ln0[ln])
         ln0 = newln[:]
         _exec("mv .tmp.gbs %s" % fnm, print_command=False)
         o.close()
     return ln0
예제 #10
0
 def write_nested_destroy(self, fnm, linedestroy):
     ln0 = list(range(len(open(fnm).readlines())))
     for layer in linedestroy:
         f = open(fnm).readlines()
         o = wopen('.tmp.gbs')
         newln = []
         for ln, line in enumerate(f):
             if ln not in layer:
                 print(line, end=' ', file=o)
                 newln.append(ln0[ln])
         ln0 = newln[:]
         _exec("mv .tmp.gbs %s" % fnm, print_command=False)
         o.close()
     return ln0
예제 #11
0
 def npt_simulation(self, temperature, pressure, simnum):
     """ Submit a NPT simulation to the Work Queue. """
     wq = getWorkQueue()
     if not os.path.exists('npt_result.p'):
         link_dir_contents(os.path.join(self.root,self.rundir),os.getcwd())
         self.last_traj += [os.path.join(os.getcwd(), i) for i in self.extra_output]
         self.lipid_mol[simnum%len(self.lipid_mol)].write(self.lipid_coords, ftype='tinker' if self.engname == 'tinker' else None)
         cmdstr = '%s python npt_lipid.py %s %.3f %.3f' % (self.nptpfx, self.engname, temperature, pressure)
         if wq is None:
             logger.info("Running condensed phase simulation locally.\n")
             logger.info("You may tail -f %s/npt.out in another terminal window\n" % os.getcwd())
             _exec(cmdstr, copy_stderr=True, outfnm='npt.out')
         else:
             queue_up(wq, command = cmdstr+' &> npt.out',
                      input_files = self.nptfiles + self.scripts + ['forcebalance.p'],
                      output_files = ['npt_result.p', 'npt.out'] + self.extra_output, tgt=self)
예제 #12
0
    def minGrids(self):
        """
        For each unique residue, loop through the engine.mol object of each target and minimize all structures.
        Then gather all the files for each target into one file that is placed in the "Cluster" directory. 
        """
        printcool("Initial MM Minimization")

        cwd = os.getcwd()
        if not os.path.isdir("{}/Cluster".format(self.base_tmp)):
            os.makedirs("{}/Cluster".format(self.base_tmp))
        for k in self.unique_res:
            collect = None
            for i in range(len(self.unique_res[k])):
                os.chdir(self.unique_res[k][i].tempdir)
                scr = os.path.join(self.root, self.unique_res[k][i].tempdir)
                if not os.path.isdir("{}".format(self.opt_name)):
                    os.makedirs("{}".format(self.opt_name))
                os.chdir("{}".format(self.opt_name))
                for struct in range(len(self.unique_res[k][i].engine.mol)):
                    if not os.path.isdir("{}".format(struct)):
                        os.makedirs("{}".format(struct))
                    os.chdir("{}".format(struct))
                    engine_files = os.listdir("{}".format(scr))
                    [
                        os.symlink("{}/{}".format(scr, z), z)
                        for z in engine_files
                        if not os.path.isdir("{}/{}".format(scr, z))
                    ]
                    os.symlink(
                        os.path.join(self.root, self.options['ffdir'],
                                     self.options['forcefield'][0]),
                        "{}".format(self.options['forcefield'][0]))
                    energy, rmsd, geom = self.unique_res[k][i].engine.optimize(
                        struct)
                    geom.write("{}".format(self.min_file_name))
                    os.chdir(os.path.join(scr, self.opt_name))
                _exec('find . -name {} | sort | xargs cat > mm_opt.gro'.format(
                    self.min_file_name))
                mol = Molecule("mm_opt.gro")
                if collect is None: collect = mol
                else: collect.append(mol)
                os.chdir(cwd)
            os.chdir("{}/Cluster".format(self.base_tmp))
            if not os.path.isdir(k): os.makedirs(k)
            os.chdir(k)
            collect.write("mm_opt.gro")
            os.chdir(cwd)
예제 #13
0
def callgmx(command, stdin=None, print_to_screen=False, print_command=False, **kwargs):
    """ Call GROMACS; prepend the gmxpath to the call to the GROMACS program. """
    ## Always, always remove backup files.
    rm_gmx_baks(os.getcwd())
    ## Call a GROMACS program as you would from the command line.
    csplit = command.split()
    prog = os.path.join(self.gmxpath, csplit[0])
    csplit[0] = prog + self.gmxsuffix
    return _exec(' '.join(csplit), stdin=stdin, print_to_screen=print_to_screen, print_command=print_command, **kwargs) 
예제 #14
0
    def calltinker(self, command, stdin=None, print_to_screen=False, print_command=False, **kwargs):

        """ Call TINKER; prepend the tinkerpath to calling the TINKER program. """

        csplit = command.split()
        # Sometimes the engine changes dirs and the key goes missing, so we link it.
        if "%s.key" % self.name in csplit and not os.path.exists("%s.key" % self.name):
            LinkFile(self.abskey, "%s.key" % self.name)
        prog = os.path.join(self.tinkerpath, csplit[0])
        csplit[0] = prog
        o = _exec(
            " ".join(csplit),
            stdin=stdin,
            print_to_screen=print_to_screen,
            print_command=print_command,
            rbytes=1024,
            **kwargs
        )
        # Determine the TINKER version number.
        for line in o[:10]:
            if "Version" in line:
                vw = line.split()[2]
                if len(vw.split(".")) <= 2:
                    vn = float(vw)
                else:
                    vn = float(vw.split(".")[:2])
                vn_need = 6.3
                try:
                    if vn < vn_need:
                        if self.warn_vn:
                            warn_press_key(
                                "ForceBalance requires TINKER %.1f - unexpected behavior with older versions!" % vn_need
                            )
                        self.warn_vn = True
                except:
                    logger.error("Unable to determine TINKER version number!\n")
                    raise RuntimeError
        for line in o[-10:]:
            # Catch exceptions since TINKER does not have exit status.
            if "TINKER is Unable to Continue" in line:
                for l in o:
                    logger.error("%s\n" % l)
                time.sleep(1)
                logger.error(
                    "TINKER may have crashed! (See above output)\nThe command was: %s\nThe directory was: %s\n"
                    % (" ".join(csplit), os.getcwd())
                )
                raise RuntimeError
                break
        for line in o:
            if "D+" in line:
                logger.info(line + "\n")
                warn_press_key(
                    "TINKER returned a very large floating point number! (See above line; will give error on parse)"
                )
        return o
예제 #15
0
 def npt_simulation(self, temperature, pressure, simnum):
     """ Submit a NPT simulation to the Work Queue. """
     wq = getWorkQueue()
     if not (os.path.exists('npt_result.p') or os.path.exists('npt_result.p.bz2')):
         link_dir_contents(os.path.join(self.root,self.rundir),os.getcwd())
         if self.traj != None:
             self.conf_pdb.xyzs[0] = self.traj.xyzs[simnum%len(self.traj)]
             self.conf_pdb.boxes[0] = self.traj.boxes[simnum%len(self.traj)]
         self.conf_pdb.write('conf.pdb')
         if wq == None:
             print "Running condensed phase simulation locally."
             print "You may tail -f %s/npt.out in another terminal window" % os.getcwd()
             cmdstr = 'bash runcuda.sh python npt.py conf.pdb %s %i %.3f %.3f %.3f %.3f%s%s%s%s%s%s%s%s --liquid_equ_steps %i &> npt.out' % \
                 (self.FF.openmmxml, self.liquid_prod_steps, self.liquid_timestep, 
                  self.liquid_interval, temperature, pressure, 
                  " --force_cuda" if self.force_cuda else "", 
                  " --anisotropic" if self.anisotropic_box else "", 
                  " --mts_vvvr" if self.mts_vvvr else "", 
                  " --minimize_energy" if self.minimize_energy else "", 
                  " --gas_equ_steps %i" % self.gas_equ_steps if self.gas_equ_steps > 0 else "", 
                  " --gas_prod_steps %i" % self.gas_prod_steps if self.gas_prod_steps > 0 else "", 
                  " --gas_timestep %f" % self.gas_timestep if self.gas_timestep > 0.0 else "", 
                  " --gas_interval %f" % self.gas_interval if self.gas_interval > 0.0 else "", 
                  self.liquid_equ_steps)
             _exec(cmdstr)
         else:
             queue_up(wq,
                      command = 'bash runcuda.sh python npt.py conf.pdb %s %i %.3f %.3f %.3f %.3f%s%s%s%s%s%s%s%s --liquid_equ_steps %i &> npt.out' % \
                          (self.FF.openmmxml, self.liquid_prod_steps, self.liquid_timestep, 
                           self.liquid_interval, temperature, pressure, 
                           " --force_cuda" if self.force_cuda else "", 
                           " --anisotropic" if self.anisotropic_box else "", 
                           " --mts_vvvr" if self.mts_vvvr else "", 
                           " --minimize_energy" if self.minimize_energy else "", 
                           " --gas_equ_steps %i" % self.gas_equ_steps if self.gas_equ_steps > 0 else "", 
                           " --gas_prod_steps %i" % self.gas_prod_steps if self.gas_prod_steps > 0 else "", 
                           " --gas_timestep %f" % self.gas_timestep if self.gas_timestep > 0.0 else "", 
                           " --gas_interval %f" % self.gas_interval if self.gas_interval > 0.0 else "", 
                           self.liquid_equ_steps),
                      input_files = ['runcuda.sh', 'npt.py', 'conf.pdb', 'mono.pdb', 'forcebalance.p'],
                      #output_files = ['dynamics.dcd', 'npt_result.p', 'npt.out', self.FF.openmmxml])
                      output_files = ['npt_result.p.bz2', 'npt.out', self.FF.openmmxml],
                      tgt=self)
예제 #16
0
 def npt_simulation(self, temperature, pressure, simnum):
     """ Submit a NPT simulation to the Work Queue. """
     wq = getWorkQueue()
     if not (os.path.exists('npt_result.p') or os.path.exists('npt_result.p.bz2')):
         link_dir_contents(os.path.join(self.root,self.rundir),os.getcwd())
         self.last_traj += [os.path.join(os.getcwd(), i) for i in self.extra_output]
         if self.liquid_traj != None:
             self.liquid_conf.xyzs[0] = self.liquid_traj.xyzs[simnum%len(self.liquid_traj)]
             self.liquid_conf.boxes[0] = self.liquid_traj.boxes[simnum%len(self.liquid_traj)]
         self.liquid_conf.write(self.liquid_fnm)
         cmdstr = '%s python npt.py %s %i %.3f %.3f %.3f %.3f %s' % (self.nptpfx, self.engine, self.liquid_prod_steps, self.liquid_timestep,
                                                                     self.liquid_interval, temperature, pressure, ' '.join([i for i in self.nptsfx if i != None]))
         if wq == None:
             logger.info("Running condensed phase simulation locally.\n")
             logger.info("You may tail -f %s/npt.out in another terminal window\n" % os.getcwd())
             _exec(cmdstr, outfnm='npt.out')
         else:
             queue_up(wq, command = cmdstr+' &> npt.out',
                      input_files = self.nptfiles + ['npt.py', self.liquid_fnm, self.gas_fnm, 'forcebalance.p'],
                      output_files = ['npt_result.p.bz2', 'npt.out'] + self.extra_output,
                      tgt=self)
예제 #17
0
    def callamber(self, command, stdin=None, print_to_screen=False, print_command=False, **kwargs):

        """ Call TINKER; prepend the amberhome to calling the TINKER program. """

        csplit = command.split()
        # Sometimes the engine changes dirs and the inpcrd/prmtop go missing, so we link it.
        # Prepend the AMBER path to the program call.
        prog = os.path.join(self.amberhome, "bin", csplit[0])
        csplit[0] = prog
        # No need to catch exceptions since failed AMBER calculations will return nonzero exit status.
        o = _exec(' '.join(csplit), stdin=stdin, print_to_screen=print_to_screen, print_command=print_command, rbytes=1024, **kwargs)
        return o
예제 #18
0
 def driver(self):
     ## Actually run PSI4.
     if not in_fd() and CheckBasis():
         logger.info("Now checking for linear dependencies.\n")
         _exec("cp %s %s.bak" % (self.GBSfnm, self.GBSfnm), print_command=False)
         ln0 = self.write_nested_destroy(self.GBSfnm, self.FF.linedestroy_save)
         o = wopen(".lindep.dat")
         for line in open(self.DATfnm).readlines():
             s = line.split("#")[0].split()
             if len(s) == 3 and s[0].lower() == 'basis' and s[1].lower() == 'file':
                 print >> o, "basis file %s" % self.GBSfnm
             else:
                 print >> o, line,
         o.close()
         _exec("mv .lindep.dat %s" % self.DATfnm, print_command=False)
         _exec("psi4 %s" % self.DATfnm, print_command=False)
         LI = GBS_Reader()
         LI_lines = {}
         ## Read in the commented linindep.gbs file and ensure that these same lines are commented in the new .gbs file
         for line in open('linindep.gbs'):
             LI.feed(line,linindep=True)
             key = '.'.join([str(i) for i in LI.element,LI.amom,LI.basis_number[LI.element],LI.contraction_number])
             if LI.isdata:
                 if key in LI_lines:
                     logger.info("Duplicate key found:\n")
                     logger.info("%s\n" % key)
                     logger.info(str(LI_lines[key]))
                     logger.info(line)
                     warn_press_key("In %s, the LI_lines dictionary should not contain repeated keys!" % __file__)
                 LI_lines[key] = (line, LI.destroy)
예제 #19
0
    def runTest(self):
        """Check utility functions in forcebalance.nifty"""

        ##variable manipulation functions
        self.assertTrue(isint("1"))
        self.assertFalse(isint("1."))
        self.assertTrue(isint("-4"))
        self.assertFalse(isint("-3.14"))

        self.assertTrue(isfloat("1.5"))
        self.assertTrue(isfloat("1"))
        self.assertFalse(isfloat("a"))

        self.assertTrue(isdecimal("1.0"))
        self.assertFalse(isdecimal("1"))

        for result in get_least_squares(([0]),[0]):
            self.assertFalse(result.any())

        ##least squares function tests
        #   trivial fully determined
        X=((1,3,-2),(3,5,6),(2,4,3))
        Y=(5,7,8)
        result = get_least_squares(X,Y)[0]
        self.assertAlmostEqual(result[0], -15)
        self.assertAlmostEqual(result[1], 8)
        self.assertAlmostEqual(result[2], 2)

        #   inconsistent system
        X=((1,),(1,))
        Y=(0,1)
        result = get_least_squares(X,Y)[0]
        self.assertAlmostEqual(result[0], .5)

        #   overdetermined system
        X=((2,0),(-1,1),(0,2))
        Y=(1,0,-1)
        result = get_least_squares(X,Y)[0]
        self.assertAlmostEqual(result[0], 1./3.)
        self.assertAlmostEqual(result[1], -1./3.)

        ##matrix manipulations
        X=flat(X)
        self.assertEqual(X.shape, (6,))
        X=row(X)
        self.assertEqual(X.shape, (1,6))
        X=col(X)
        self.assertEqual(X.shape, (6,1))

        ##_exec
        self.assertEqual(type(_exec("")),list)
        self.assertEqual(_exec("echo test")[0],"test")
        _exec("touch .test")
        self.assertTrue(os.path.isfile(".test"))
        _exec("rm .test")
        self.assertFalse(os.path.isfile(".test"))
        self.assertRaises(Exception, _exec, "exit 255")
예제 #20
0
    def energy_force_driver_all_external_(self):
        ## Create the run input files (inpcrd, prmtop) from the force field file.  
        ## Note that the frcmod and mol2 files are required.
        ## This is like 'grompp' in GROMACS.
        _exec("tleap -f stage.leap", print_to_screen=False, print_command=False)
        ## This line actually runs AMBER.
        _exec("sander -i force.mdin -o force.mdout -p prmtop -c inpcrd -y all.mdcrd -O", print_to_screen=False, print_command=False)
        ## Simple parser for 
        ParseMode = 0
        Energies = []
        Forces = []
        Force = []
        for line in open('forcedump.dat'):
            line = line.strip()
            sline = line.split()
            if ParseMode == 1:
                if len(sline) == 1 and isfloat(sline[0]):
                    Energies.append(float(sline[0]) * 4.184)
                    ParseMode = 0
            if ParseMode == 2:
                if len(sline) == 3 and all(isfloat(sline[i]) for i in range(3)):
                    Force += [float(sline[i]) * 4.184 * 10 for i in range(3)]
                if len(Force) == 3*self.qmatoms:
                    Forces.append(np.array(Force))
                    Force = []
                    ParseMode = 0
            if line == '0 START of Energies':
                ParseMode = 1
            elif line == '1 Total Force':
                ParseMode = 2

        Energies = np.array(Energies[1:])
        Forces = np.array(Forces[1:])
        
        M = np.hstack((Energies.reshape(-1,1), Forces))

        return M
예제 #21
0
 def npt_simulation(self, temperature, pressure, simnum):
     """ Submit a NPT simulation to the Work Queue. """
     wq = getWorkQueue()
     if not os.path.exists("npt_result.p"):
         link_dir_contents(os.path.join(self.root, self.rundir), os.getcwd())
         self.last_traj += [os.path.join(os.getcwd(), i) for i in self.extra_output if ".gro" not in i]
         prev_iter_ICs = os.path.join(os.getcwd(), "lipid.gro")
         if not os.path.exists(prev_iter_ICs):
             self.lipid_mol[simnum % len(self.lipid_mol)].write(
                 self.lipid_coords, ftype="tinker" if self.engname == "tinker" else None
             )
         cmdstr = "%s python npt_lipid.py %s %.3f %.3f" % (self.nptpfx, self.engname, temperature, pressure)
         if wq == None:
             logger.info("Running condensed phase simulation locally.\n")
             logger.info("You may tail -f %s/npt.out in another terminal window\n" % os.getcwd())
             _exec(cmdstr, copy_stderr=True, outfnm="npt.out")
         else:
             queue_up(
                 wq,
                 command=cmdstr + " &> npt.out",
                 input_files=self.nptfiles + self.scripts + ["forcebalance.p"],
                 output_files=["npt_result.p", "npt.out"] + self.extra_output,
                 tgt=self,
             )
예제 #22
0
 def generate_vsite_positions(self):
     """ Call mdrun in order to update the virtual site positions. """
     # Remove backup files.
     rm_gmx_baks(os.getcwd())
     # Call grompp followed by mdrun.
     _exec(["./grompp", "-f", "shot.mdp"], print_command=False)
     _exec(["./mdrun", "-nt", "1", "-o", "shot.trr", "-rerunvsite", "-rerun", "all.gro"], print_command=False)
     # Gather information
     _exec(["./trjconv","-f","shot.trr","-o","trajout.gro","-ndec","6","-novel","-noforce"], stdin='System', print_command=False)
     NewMol = Molecule("trajout.gro")
     self.traj.xyzs = NewMol.xyzs
예제 #23
0
    def calltinker(self, command, stdin=None, print_to_screen=False, print_command=False, **kwargs):

        """ Call TINKER; prepend the tinkerpath to calling the TINKER program. """

        csplit = command.split()
        # Sometimes the engine changes dirs and the key goes missing, so we link it.
        if "%s.key" % self.name in csplit and not os.path.exists("%s.key" % self.name):
            LinkFile(self.abskey, "%s.key" % self.name)
        prog = os.path.join(self.tinkerpath, csplit[0])
        csplit[0] = prog
        o = _exec(' '.join(csplit), stdin=stdin, print_to_screen=print_to_screen, print_command=print_command, **kwargs)
        for line in o[-10:]:
            # Catch exceptions since TINKER does not have exit status.
            if "TINKER is Unable to Continue" in line:
                for l in o:
                    logger.info("%s\n" % l)
                warn_press_key("TINKER may have crashed! (See above output)")
                break
        return o
예제 #24
0
 def energy_force_driver_all(self):
     """ Computes the energy and force using GROMACS for a trajectory.  This does not require GROMACS-X2. """
     # Remove backup files.
     rm_gmx_baks(os.getcwd())
     # Call grompp followed by mdrun.
     _exec(["./grompp", "-f", "shot.mdp"], print_command=False)
     _exec(["./mdrun", "-nt", "1", "-o", "shot.trr", "-rerunvsite", "-rerun", "all.gro"], print_command=False)
     # Gather information
     _exec(["./g_energy","-xvg","no"], stdin='Potential', print_command=False)
     _exec(["./g_traj","-xvg","no","-f","shot.trr","-of","force.xvg","-fp"], stdin='System', print_command=False)
     M = []
     Efile = open("energy.xvg").readlines()
     Ffile = open("force.xvg").readlines()
     # Loop through the snapshots
     for Eline, Fline in zip(Efile, Ffile):
         # Compute the potential energy and append to list
         Energy = [float(Eline.split()[1])]
         # When we read in the force, make sure that we only read in the forces on real atoms.
         Force = [float(j) for i, j in enumerate(Fline.split()[1:]) if self.AtomMask[i/3]]
         M.append(array(Energy + Force)[:3*self.fitatoms+1])
     return array(M)
예제 #25
0
def energy_driver(mvals,FF,xyz,tky,verbose=False,dipole=False):
    """
    Compute a set of snapshot energies (and optionally, dipoles) as a function of the force field parameters.

    ForceBalance creates the force field, TINKER reads it in, and we loop through the snapshots
    to compute the energies.

    @param[in] mvals Mathematical parameter values
    @param[in] FF ForceBalance force field object
    @return E A numpy array of energies in kilojoules per mole

    """
    # Part of the command line argument to TINKER.
    basename = xyz[:-4]
    xin = "%s" % xyz + ("" if tky == None else " -k %s" % tky)
    xain = "%s.arc" % basename + ("" if tky == None else " -k %s" % tky)
    
    # Print the force field file from the ForceBalance object, with modified parameters.
    FF.make(mvals)
    
    # Execute TINKER.
    cmdstr = "./analyze %s" % xain
    oanl = _exec(cmdstr,stdin="E",print_command=verbose,print_to_screen=verbose)

    # Read potential energy from file.
    E = []
    for line in oanl:
        if 'Total Potential Energy : ' in line:
            E.append(float(line.split()[4]))
    E = np.array(E) * 4.184
    if dipole:
        # If desired, read dipole from file.
        D = []
        for line in oanl:
            if 'Dipole X,Y,Z-Components :' in line:
                D.append([float(line.split()[i]) for i in range(-3,0)])
        D = np.array(D)
        # Return a Nx4 array with energies in the first column and dipole in columns 2-4.
        answer = np.hstack((E.reshape(-1,1), D.reshape(-1,3)))
        return answer
    else:
        return E
예제 #26
0
    def callamber(self,
                  command,
                  stdin=None,
                  print_to_screen=False,
                  print_command=False,
                  **kwargs):
        """ Call TINKER; prepend the amberhome to calling the TINKER program. """

        csplit = command.split()
        # Sometimes the engine changes dirs and the inpcrd/prmtop go missing, so we link it.
        # Prepend the AMBER path to the program call.
        prog = os.path.join(self.amberhome, "bin", csplit[0])
        csplit[0] = prog
        # No need to catch exceptions since failed AMBER calculations will return nonzero exit status.
        o = _exec(' '.join(csplit),
                  stdin=stdin,
                  print_to_screen=print_to_screen,
                  print_command=print_command,
                  rbytes=1024,
                  **kwargs)
        return o
예제 #27
0
#!/usr/bin/env python

from forcebalance.molecule import Molecule
from forcebalance.nifty import _exec
import os

np = ""
if "CORES_PER_WORKER" in os.environ and int(
        os.environ["CORES_PER_WORKER"]) > 1:
    np = " -np %i" % int(os.environ["CORES_PER_WORKER"])

_exec("touch opt.xyz")
_exec("touch energy.txt")
_exec("rm -f qchem.out.prev")
_exec("touch qchem.out.prev")
qcin = Molecule("qchem.in", ftype="qcin")
qcin.edit_qcrems({'geom_opt_max_cycles': '100'})
qcin.write("qchem.in")
_exec("qchem42 %s qchem.in qchem.out &> qchem.err" % np)


def special_criterion():
    mk = 0
    mx = 0
    Cnvgd = {}
    for ln, line in enumerate(open("qchem.out").readlines()):
        if "Maximum optimization cycles reached" in line:
            mx = 1
        if "Maximum     Tolerance    Cnvgd?" in line:
            mk = ln
        if mk > 0 and ln > mk and ln <= mk + 3:
예제 #28
0
    def test_nifty_functions(self):
        """Check utility functions in forcebalance.nifty"""

        ##variable manipulation functions
        self.logger.debug("Checking nifty.isint()\n")
        assert isint("1")
        assert not (isint("1."))
        assert isint("-4")
        assert not (isint("-3.14"))

        self.logger.debug("Checking nifty.isfloat()\n")
        assert isfloat("1.5")
        assert isfloat("1")
        assert not (isfloat("a"))

        self.logger.debug("Checking nifty.isdecimal()\n")
        assert isdecimal("1.0")
        assert not (isdecimal("1"))

        for result in get_least_squares(([0]), [0]):
            assert not (result.any())

        self.logger.debug(
            "Verifying nifty.get_least_squares() results for some trivial cases\n"
        )
        ##least squares function tests
        #   trivial fully determined
        X = ((1, 3, -2), (3, 5, 6), (2, 4, 3))
        Y = (5, 7, 8)
        result = get_least_squares(X, Y)[0]
        np.testing.assert_almost_equal(result[0], -15)
        np.testing.assert_almost_equal(result[1], 8)
        np.testing.assert_almost_equal(result[2], 2)

        #   inconsistent system
        X = ((1, ), (1, ))
        Y = (0, 1)
        result = get_least_squares(X, Y)[0]
        np.testing.assert_almost_equal(result[0], .5)

        #   overdetermined system
        X = ((2, 0), (-1, 1), (0, 2))
        Y = (1, 0, -1)
        result = get_least_squares(X, Y)[0]
        np.testing.assert_almost_equal(result[0], 1. / 3.)
        np.testing.assert_almost_equal(result[1], -1. / 3.)

        self.logger.debug(
            "Verify nifty matrix manipulations perform as expected\n")
        ##matrix manipulations
        X = flat(X)
        assert X.shape == (6, )
        X = row(X)
        assert X.shape == (1, 6)
        X = col(X)
        assert X.shape == (6, 1)

        self.logger.debug("Running some test processes using nifty._exec()\n")
        ##_exec
        assert type(_exec("")) is list
        assert _exec("echo test")[0] == "test"
        _exec("touch .test")
        assert os.path.isfile(".test")
        _exec("rm .test")
        assert not (os.path.isfile(".test"))
        with pytest.raises(Exception) as excinfo:
            _exec("exit 255")
예제 #29
0
    def __init__(self, options, tgt_opts, forcefield):
        """
        All options here are intended to be usable by every
        conceivable type of target (in other words, only
        add content here if it's widely applicable.)

        If we want to add attributes that are more specific
        (i.e. a set of reference forces for force matching), they
        are added in the subclass AbInitio that inherits from
        Target.

        """
        super(Target, self).__init__(options)
        #======================================#
        # Options that are given by the parser #
        #======================================#
        ## Root directory of the whole project
        self.set_option(options, 'root')
        ## Name of the target
        self.set_option(tgt_opts, 'name')
        if self.name in ["forcefield-remote"]:
            logger.error(
                "forcefield-remote is not an allowed target name (reserved)")
            raise RuntimeError
        ## Type of target
        self.set_option(tgt_opts, 'type')
        ## Relative weight of the target
        self.set_option(tgt_opts, 'weight')
        ## Switch for finite difference gradients
        self.set_option(tgt_opts, 'fdgrad')
        ## Switch for finite difference Hessians
        self.set_option(tgt_opts, 'fdhess')
        ## Switch for FD gradients + Hessian diagonals
        self.set_option(tgt_opts, 'fdhessdiag')
        ## How many seconds to sleep (if any)
        self.set_option(tgt_opts, 'sleepy')
        ## Parameter types that trigger FD gradient elements
        self.set_option(None,
                        None,
                        'fd1_pids', [i.upper() for i in tgt_opts['fd_ptypes']],
                        default=[])
        self.set_option(None,
                        None,
                        'fd2_pids', [i.upper() for i in tgt_opts['fd_ptypes']],
                        default=[])
        ## Parameter types that trigger FD Hessian elements
        ## Finite difference step size
        self.set_option(options, 'finite_difference_h', 'h')
        ## Whether to make backup files
        self.set_option(options, 'backup')
        ## Directory to read data from.
        self.set_option(tgt_opts, 'read', 'rd')
        if self.rd is not None: self.rd = self.rd.strip("/")
        ## Iteration where we turn on zero-gradient skipping.
        self.set_option(options, 'zerograd')
        ## Gradient norm below which we skip.
        self.set_option(tgt_opts, 'epsgrad')
        ## Dictionary of whether to call the derivatives.
        self.pgrad = list(range(forcefield.np))
        self.OptionDict['pgrad'] = self.pgrad

        #======================================#
        #     Variables which are set here     #
        #======================================#
        ## Relative directory of target
        if os.path.exists('targets'):
            tgtdir = 'targets'
        elif os.path.exists('simulations'):
            tgtdir = 'simulations'
        elif os.path.exists('targets.tar.bz2'):
            logger.info("Extracting targets folder from archive.\n")
            _exec("tar xvjf targets.tar.bz2")
            tgtdir = 'targets'
        elif os.path.exists('targets.tar.gz'):
            logger.info("Extracting targets folder from archive.\n")
            _exec("tar xvzf targets.tar.gz")
            tgtdir = 'targets'
        else:
            logger.error(
                '\x1b[91mThe targets directory is missing!\x1b[0m\nDid you finish setting up the target data?\nPlace the data in a directory called "targets" or "simulations"\n'
            )
            raise RuntimeError
        self.set_option(None, None, 'tgtdir', os.path.join(tgtdir, self.name))
        ## Temporary (working) directory; it is temp/(target_name)
        ## Used for storing temporary variables that don't change through the course of the optimization
        if 'input_file' in options and options['input_file'] is not None:
            self.tempbase = os.path.splitext(options['input_file'])[0] + '.tmp'
        else:
            self.tempbase = "temp"
        self.tempdir = os.path.join(self.tempbase, self.name)
        ## self.tempdir     = os.path.join('temp',self.name)
        ## The directory in which the simulation is running - this can be updated.
        self.rundir = self.tempdir
        ## Need the forcefield (here for now)
        self.FF = forcefield
        ## mol2 files that are stored in the forcefield folder
        ## need to be included in the list of mol2 files for the target
        if hasattr(self, 'mol2'):
            for fnm in self.FF.fnms:
                if fnm.endswith('.mol2'):
                    self.mol2.append[fnm]

        ## Counts how often the objective function was computed
        self.xct = 0
        ## Counts how often the gradient was computed
        self.gct = 0
        ## Counts how often the Hessian was computed
        self.hct = 0
        ## Whether to read indicate.log from file when restarting an aborted run.
        self.read_indicate = True
        ## Whether to write indicate.log at every iteration (true for all but remote.)
        self.write_indicate = True
        ## Whether to read objective.p from file when restarting an aborted run.
        self.read_objective = True
        ## Whether to write objective.p at every iteration (true for all but remote.)
        self.write_objective = True
        ## Create a new temp directory.
        if not options['continue']:
            self.refresh_temp_directory()
        else:
            if not os.path.exists(os.path.join(self.root, self.tempdir)):
                os.makedirs(os.path.join(self.root, self.tempdir))
        ## This flag specifies whether the target has been evaluated yet.
        self.evaluated = False
        ## This flag specifies whether the previous optimization step was good.
        self.goodstep = False
예제 #30
0
def drive_msms(xyz, radii, density):
    with open(os.path.join('msms_input.p'),'w') as f: lp_dump((xyz, radii, density),f)
    _exec("CallMSMS.py", print_to_screen=False, print_command=False)
    return lp_load(open('msms_output.p'))
예제 #31
0
def get_monomer_properties(print_stuff=0):
    # Multiply a quantity in nm to convert to a0
    nm_to_a0 = 1./0.05291772108
    # Multiply a quantity in e*a0 to convert to Debye
    ea0_to_debye = 0.393430307
    os.system("rm -rf *.log \#*")
    _exec(["./grompp"], print_command=False)
    _exec(["./mdrun"], outfnm="mdrun.txt", print_command=False)
    _exec("./trjconv -f traj.trr -o confout.gro -ndec 6".split(), stdin="0\n", print_command=False)
    x = []
    q = []
    for line in open("confout.gro").readlines():
        sline = line.split()
        if len(sline) >= 6 and isfloat(sline[3]) and isfloat(sline[4]) and isfloat(sline[5]):
            x.append([float(i) for i in sline[3:6]])
    for line in open("charges.log").readlines():
        sline = line.split()
        if 'AtomNr' in line:
            q.append(float(sline[5]))
    mode = 0
    a = []
    for line in open("mdrun.txt").readlines():
        if mode == 1:
            sline = line.split()
            if len(sline) == 3:
                if isfloat(sline[0]) and isfloat(sline[1]) and isfloat(sline[2]):
                    a.append([float(i) for i in sline])
                elif any(["nan" in s for s in sline[:3]]):
                    a.append([1e10,1e10,1e10])
        if "Computing the polarizability tensor" in line:
            mode = 1
    x = Np.array(x)
    q = Np.array(q)
    a = Np.array(a)
    Dip = Np.zeros(3,dtype=float)
    QuadXX = 0.0
    QuadYY = 0.0
    QuadZZ = 0.0
    OctXXZ = 0.0
    OctYYZ = 0.0
    OctZZZ = 0.0
    for i in range(q.shape[0]):
        Dip += x[i]*q[i]*nm_to_a0/ea0_to_debye
        xx = x[i,0]*x[i,0]
        yy = x[i,1]*x[i,1]
        zz = x[i,2]*x[i,2]
        z  = x[i,2]
        r2 = Np.dot(x[i,:],x[i,:])
        QuadXX += 0.5*q[i]*(2*xx - yy - zz) * 10 * nm_to_a0 / ea0_to_debye
        QuadYY += 0.5*q[i]*(2*yy - xx - zz) * 10 * nm_to_a0 / ea0_to_debye
        QuadZZ += 0.5*q[i]*(2*zz - xx - yy) * 10 * nm_to_a0 / ea0_to_debye
        OctXXZ += 0.5*q[i]*z*(5*xx-r2) * 100 * nm_to_a0 / ea0_to_debye
        OctYYZ += 0.5*q[i]*z*(5*yy-r2) * 100 * nm_to_a0 / ea0_to_debye
        OctZZZ += 0.5*q[i]*z*(5*zz-3*r2) * 100 * nm_to_a0 / ea0_to_debye
    DipZ = Dip[2]
    AlphaXX = a[0,0]
    AlphaYY = a[1,1]
    AlphaZZ = a[2,2]
    # Quantities taken from Niu (2001) and Berne (1994)
    DipZ0 = 1.855
    QuadXX0 =  2.51
    QuadYY0 = -2.63
    QuadZZ0 =  0.11
    Quad0   = Np.sqrt((QuadXX0**2 + QuadYY0**2 + QuadZZ0**2)/3)
    OctXXZ0 =  2.58
    OctYYZ0 = -1.24
    OctZZZ0 = -1.35
    Oct0   = Np.sqrt((OctXXZ0**2 + OctYYZ0**2 + OctZZZ0**2)/3)
    AlphaXX0 = 10.32
    AlphaYY0 =  9.56
    AlphaZZ0 =  9.91
    Alpha0   = Np.sqrt((AlphaXX0**2 + AlphaYY0**2 + AlphaZZ0**2)/3)
    Err_DipZ = ((DipZ-DipZ0)/DipZ0)**2
    Err_QuadXX = ((QuadXX-QuadXX0)/Quad0)**2
    Err_QuadYY = ((QuadYY-QuadYY0)/Quad0)**2
    Err_QuadZZ = ((QuadZZ-QuadZZ0)/Quad0)**2
    Err_OctXXZ = ((OctXXZ-OctXXZ0)/Oct0)**2
    Err_OctYYZ = ((OctYYZ-OctYYZ0)/Oct0)**2
    Err_OctZZZ = ((OctZZZ-OctZZZ0)/Oct0)**2
    Err_AlphaXX = ((AlphaXX-AlphaXX0)/Alpha0)**2
    Err_AlphaYY = ((AlphaYY-AlphaYY0)/Alpha0)**2
    Err_AlphaZZ = ((AlphaZZ-AlphaZZ0)/Alpha0)**2
    Objective   = Err_DipZ + (Err_QuadXX + Err_QuadYY + Err_QuadZZ)/3 + (Err_AlphaXX + Err_AlphaYY + Err_AlphaZZ)/3
    if print_stuff:
        #print "\rvalues (errors): mu_z = % .3f (%.3f) q_xx = % .3f (%.3f) q_yy = % .3f (%.3f) q_zz = % .3f (%.3f) o_xxz = % .3f (%.3f) o_yyz = % .3f (%.3f) o_zzz = % .3f (%.3f) a_xx = % .3f (%.3f) a_yy = % .3f (%.3f) a_zz = % .3f (%.3f)" % (DipZ,Err_DipZ,QuadXX,Err_QuadXX,QuadYY,Err_QuadYY,QuadZZ,Err_QuadZZ,OctXXZ,Err_OctXXZ,OctYYZ,Err_OctYYZ,OctZZZ,Err_OctZZZ,AlphaXX,Err_AlphaXX,AlphaYY,Err_AlphaYY,AlphaZZ,Err_AlphaZZ)
        logger.info("\rvalues (errors): mu_z = % .3f (%.3f) q = % .3f % .3f % .3f (% .3f % .3f % .3f) o = % .3f % .3f % .3f (% .3f % .3f % .3f) a = %.3f %.3f %.3f (%.3f %.3f %.3f) x2 = % .4f\n" % (DipZ,Err_DipZ,QuadXX,QuadYY,QuadZZ,Err_QuadXX,Err_QuadYY,Err_QuadZZ,OctXXZ,OctYYZ,OctZZZ,Err_OctXXZ,Err_OctYYZ,Err_OctZZZ,AlphaXX,AlphaYY,AlphaZZ,Err_AlphaXX,Err_AlphaYY,Err_AlphaZZ,Objective))
    #Objective   = Err_DipZ + (Err_QuadXX + Err_QuadYY + Err_QuadZZ)/3 + (Err_OctXXZ + Err_OctYYZ + Err_OctZZZ)/3 + (Err_AlphaXX + Err_AlphaYY + Err_AlphaZZ)/3
    Properties = OrderedDict()
    Properties['DipZ'] = DipZ
    Properties['QuadXX'] = QuadXX
    Properties['QuadYY'] = QuadYY
    Properties['QuadZZ'] = QuadZZ
    Properties['OctXXZ'] = OctXXZ
    Properties['OctYYZ'] = OctYYZ
    Properties['OctZZZ'] = OctZZZ
    Properties['AlphaXX'] = AlphaXX
    Properties['AlphaYY'] = AlphaYY
    Properties['AlphaZZ'] = AlphaZZ
    return Properties
예제 #32
0
#!/usr/bin/env python
from forcebalance.molecule import *
from forcebalance.nifty import _exec

#Run calculation
_exec("psi4 -n 8 eth.psi4in eth.psi4out")

#Get ouptut and write qdata.txt file
mol_out = Molecule("eth.psi4out")
mol_out.write("qdata.txt", ftype="qdata")

예제 #33
0
    def test_nifty_functions(self):
        """Check utility functions in forcebalance.nifty"""

        ##variable manipulation functions
        self.logger.debug("Checking nifty.isint()\n")
        self.assertTrue(isint("1"))
        self.assertFalse(isint("1."))
        self.assertTrue(isint("-4"))
        self.assertFalse(isint("-3.14"))

        self.logger.debug("Checking nifty.isfloat()\n")
        self.assertTrue(isfloat("1.5"))
        self.assertTrue(isfloat("1"))
        self.assertFalse(isfloat("a"))

        self.logger.debug("Checking nifty.isdecimal()\n")
        self.assertTrue(isdecimal("1.0"))
        self.assertFalse(isdecimal("1"))

        for result in get_least_squares(([0]),[0]):
            self.assertFalse(result.any())

        self.logger.debug("Verifying nifty.get_least_squares() results for some trivial cases\n")
        ##least squares function tests
        #   trivial fully determined
        X=((1,3,-2),(3,5,6),(2,4,3))
        Y=(5,7,8)
        result = get_least_squares(X,Y)[0]
        self.assertAlmostEqual(result[0], -15)
        self.assertAlmostEqual(result[1], 8)
        self.assertAlmostEqual(result[2], 2)

        #   inconsistent system
        X=((1,),(1,))
        Y=(0,1)
        result = get_least_squares(X,Y)[0]
        self.assertAlmostEqual(result[0], .5)

        #   overdetermined system
        X=((2,0),(-1,1),(0,2))
        Y=(1,0,-1)
        result = get_least_squares(X,Y)[0]
        self.assertAlmostEqual(result[0], 1./3.)
        self.assertAlmostEqual(result[1], -1./3.)

        self.logger.debug("Verify nifty matrix manipulations perform as expected\n")
        ##matrix manipulations
        X=flat(X)
        self.assertEqual(X.shape, (6,))
        X=row(X)
        self.assertEqual(X.shape, (1,6))
        X=col(X)
        self.assertEqual(X.shape, (6,1))

        self.logger.debug("Running some test processes using nifty._exec()\n")
        ##_exec
        self.assertEqual(type(_exec("")),list)
        self.assertEqual(_exec("echo test")[0],"test")
        _exec("touch .test")
        self.assertTrue(os.path.isfile(".test"))
        _exec("rm .test")
        self.assertFalse(os.path.isfile(".test"))
        self.assertRaises(Exception, _exec, "exit 255")
예제 #34
0
def run_simulation(xyz, tky, tstep, nstep, neq, npr, pbc=True, verbose=False):
    """ Run a NPT simulation and gather statistics. """

    basename = xyz[:-4]
    xin = "%s" % xyz + ("" if tky == None else " -k %s" % tky)
    xain = "%s.arc" % basename + ("" if tky == None else " -k %s" % tky)
    
    if args.minimize_energy:
        cmdstr = "./minimize %s 1.0e-1" % xin
    _exec(cmdstr,print_command=verbose,print_to_screen=verbose)
    _exec("mv %s_2 %s" % (xyz,xyz),print_command=verbose,print_to_screen=verbose)
    print "Running equilibration"
    # Run the equilibration.
    if pbc:
        cmdstr = "./dynamic %s %i %f %f 4 %f %f" % (xin, nstep*neq, tstep, float(nstep*tstep)/1000, temperature, pressure)
    else:
        cmdstr = "./dynamic %s %i %f %f 2 %f" % (xin, nstep*neq, tstep, float(nstep*tstep)/1000, temperature)
    _exec(cmdstr,print_command=verbose,print_to_screen=verbose)
    _exec("rm -f %s.arc %s.box" % (basename, basename),print_command=verbose,print_to_screen=verbose)
    # Run the production.
    print "Running production"
    if pbc:
        cmdstr = "./dynamic %s %i %f %f 4 %f %f" % (xin, nstep*npr, tstep, float(nstep*tstep/1000), temperature, pressure)
    else:
        cmdstr = "./dynamic %s %i %f %f 2 %f" % (xin, nstep*npr, tstep, float(nstep*tstep/1000), temperature)
    odyn = _exec(cmdstr,print_command=verbose,print_to_screen=verbose)

    edyn = []
    kdyn = []
    for line in odyn:
        if 'Current Potential' in line:
            edyn.append(float(line.split()[2]))
        if 'Current Kinetic' in line:
            kdyn.append(float(line.split()[2]))

    edyn = np.array(edyn) * 4.184
    kdyn = np.array(kdyn) * 4.184

    print "Post-processing to get the dipole moments"
    cmdstr = "./analyze %s" % xain
    oanl = _exec(cmdstr,stdin="G,E",print_command=verbose,print_to_screen=verbose)

    # Read potential energy and dipole from file.
    eanl = []
    dip = []
    mass = 0.0
    for line in oanl:
        if 'Total System Mass' in line:
            mass = float(line.split()[-1])
        if 'Total Potential Energy : ' in line:
            eanl.append(float(line.split()[4]))
        if 'Dipole X,Y,Z-Components :' in line:
            dip.append([float(line.split()[i]) for i in range(-3,0)])

    # Energies in kilojoules per mole
    eanl = np.array(eanl) * 4.184
    # Dipole moments in debye
    dip = np.array(dip)
    # Volume of simulation boxes in cubic nanometers
    # Conversion factor derived from the following:
    # In [22]: 1.0 * gram / mole / (1.0 * nanometer)**3 / AVOGADRO_CONSTANT_NA / (kilogram/meter**3)
    # Out[22]: 1.6605387831627252
    conv = 1.6605387831627252
    if pbc:
        box = [[float(i) for i in line.split()[1:4]] for line in open(xyz[:-3]+"box").readlines()]
        vol = np.array([i[0]*i[1]*i[2] for i in box]) / 1000
        rho = conv * mass / vol
    else:
        vol = None
        rho = None

    return rho, edyn, kdyn, vol, dip
예제 #35
0
#!/usr/bin/env python
from forcebalance.molecule import *
from forcebalance.nifty import _exec

#Get output from geometry optimization
mol = Molecule("eth_opt.psi4out")
#Write outputs as xyz and qdata file
mol.write("output.xyz", ftype="xyz")

#If you want to run some other Psi4 calculation
#then you can change the settings read by Molecule
#through Molecule.psi4args and write a new calculation input
mol.psi4args["calc"] = ["energy('mp2')"]
mol.psi4args["set"]["basis"] = ["aug-cc-pVTZ"]

#Write new calculation input from the final structure
#of the optimization
mol.write("eth_energy.psi4in", selection=-1)
_exec("psi4 -n 8 eth_energy.psi4in eth_energy.psi4out")

#Read in output and write qdata.txt file
mol_energy = Molecule("eth_energy.psi4out")
mol_energy.write("qdata.txt", ftype="qdata")
예제 #36
0
    def __init__(self,options,tgt_opts,forcefield):
        """
        All options here are intended to be usable by every
        conceivable type of target (in other words, only
        add content here if it's widely applicable.)

        If we want to add attributes that are more specific
        (i.e. a set of reference forces for force matching), they
        are added in the subclass AbInitio that inherits from
        Target.

        """
        super(Target, self).__init__(options)
        #======================================#
        # Options that are given by the parser #
        #======================================#
        ## Root directory of the whole project
        self.set_option(options, 'root')
        ## Name of the target
        self.set_option(tgt_opts, 'name')
        if self.name in ["forcefield-remote"]:
            logger.error("forcefield-remote is not an allowed target name (reserved)")
            raise RuntimeError
        ## Type of target
        self.set_option(tgt_opts, 'type')
        ## Relative weight of the target
        self.set_option(tgt_opts, 'weight')
        ## Switch for finite difference gradients
        self.set_option(tgt_opts, 'fdgrad')
        ## Switch for finite difference Hessians
        self.set_option(tgt_opts, 'fdhess')
        ## Switch for FD gradients + Hessian diagonals
        self.set_option(tgt_opts, 'fdhessdiag')
        ## How many seconds to sleep (if any)
        self.set_option(tgt_opts, 'sleepy')
        ## Parameter types that trigger FD gradient elements
        self.set_option(None, None, 'fd1_pids', [i.upper() for i in tgt_opts['fd_ptypes']], default = [])
        self.set_option(None, None, 'fd2_pids', [i.upper() for i in tgt_opts['fd_ptypes']], default = [])
        ## Parameter types that trigger FD Hessian elements
        ## Finite difference step size
        self.set_option(options, 'finite_difference_h', 'h')
        ## Whether to make backup files
        self.set_option(options, 'backup')
        ## Directory to read data from.
        self.set_option(tgt_opts, 'read', 'rd')
        if self.rd is not None: self.rd = self.rd.strip("/")
        ## Iteration where we turn on zero-gradient skipping.
        self.set_option(options, 'zerograd')
        ## Gradient norm below which we skip.
        self.set_option(tgt_opts, 'epsgrad')
        ## Dictionary of whether to call the derivatives.
        self.pgrad = list(range(forcefield.np))
        self.OptionDict['pgrad'] = self.pgrad

        #======================================#
        #     Variables which are set here     #
        #======================================#
        ## Relative directory of target
        if os.path.exists('targets'):
            tgtdir = 'targets'
        elif os.path.exists('simulations'):
            tgtdir = 'simulations'
        elif os.path.exists('targets.tar.bz2'):
            logger.info("Extracting targets folder from archive.\n")
            _exec("tar xvjf targets.tar.bz2")
            tgtdir = 'targets'
        elif os.path.exists('targets.tar.gz'):
            logger.info("Extracting targets folder from archive.\n")
            _exec("tar xvzf targets.tar.gz")
            tgtdir = 'targets'
        else:
            logger.error('\x1b[91mThe targets directory is missing!\x1b[0m\nDid you finish setting up the target data?\nPlace the data in a directory called "targets" or "simulations"\n')
            raise RuntimeError
        self.set_option(None, None, 'tgtdir', os.path.join(tgtdir,self.name))
        ## Temporary (working) directory; it is temp/(target_name)
        ## Used for storing temporary variables that don't change through the course of the optimization
        if 'input_file' in options and options['input_file'] is not None:
            self.tempbase    = os.path.splitext(options['input_file'])[0]+'.tmp'
        else:
            self.tempbase    = "temp"
        self.tempdir     = os.path.join(self.tempbase, self.name)
        ## self.tempdir     = os.path.join('temp',self.name)
        ## The directory in which the simulation is running - this can be updated.
        self.rundir      = self.tempdir
        ## Need the forcefield (here for now)
        self.FF          = forcefield
        ## mol2 files that are stored in the forcefield folder
        ## need to be included in the list of mol2 files for the target
        if hasattr(self, 'mol2'):
            for fnm in self.FF.fnms:
                if fnm.endswith('.mol2'):
                    self.mol2.append(fnm)

        ## Counts how often the objective function was computed
        self.xct         = 0
        ## Counts how often the gradient was computed
        self.gct         = 0
        ## Counts how often the Hessian was computed
        self.hct         = 0
        ## Whether to read indicate.log from file when restarting an aborted run.
        self.read_indicate    = True
        ## Whether to write indicate.log at every iteration (true for all but remote.)
        self.write_indicate   = True
        ## Whether to read objective.p from file when restarting an aborted run.
        self.read_objective       = True
        ## Whether to write objective.p at every iteration (true for all but remote.)
        self.write_objective      = True
        ## Create a new temp directory.
        if not options['continue']: 
            self.refresh_temp_directory()
        else:
            if not os.path.exists(os.path.join(self.root,self.tempdir)):
                os.makedirs(os.path.join(self.root,self.tempdir))
        ## This flag specifies whether the target has been evaluated yet.
        self.evaluated = False
        ## This flag specifies whether the previous optimization step was good.
        self.goodstep = False
예제 #37
0
def main():

    """
    Run the script with -h for help
    Usage: python npt_tinker.py input.xyz [-k input.key] liquid_production_steps liquid_timestep liquid_interval temperature(K) pressure(atm)
    """

    if not os.path.exists(args.liquid_xyzfile):
        warn_press_key("Warning: %s does not exist, script cannot continue" % args.liquid_xyzfile)

    # Set up some conversion factors
    # All units are in kJ/mol
    N = niterations
    # Conversion factor for kT derived from:
    # In [6]: 1.0 / ((1.0 * kelvin * BOLTZMANN_CONSTANT_kB * AVOGADRO_CONSTANT_NA) / kilojoule_per_mole)
    # Out[6]: 120.27221251395186
    T     = temperature
    mBeta = -120.27221251395186 / temperature
    Beta  =  120.27221251395186 / temperature
    kT    =  0.0083144724712202 * temperature
    # Conversion factor for pV derived from:
    # In [14]: 1.0 * atmosphere * nanometer ** 3 * AVOGADRO_CONSTANT_NA / kilojoule_per_mole
    # Out[14]: 0.061019351687175
    pcon  =  0.061019351687175

    # Load the force field in from the ForceBalance pickle.
    FF,mvals,h,AGrad = lp_load(open('forcebalance.p'))
    
    # Create the force field XML files.
    FF.make(mvals)

    #=================================================================#
    #     Get the number of molecules from the liquid xyz file.       #
    #=================================================================#

    xin = "%s" % args.liquid_xyzfile + ("" if args.liquid_keyfile == None else " -k %s" % args.liquid_keyfile)
    cmdstr = "./analyze %s" % xin
    oanl = _exec(cmdstr,stdin="G",print_command=True,print_to_screen=True)
    molflag = False
    for line in oanl:
        if 'Number of Molecules' in line:
            if not molflag:
                NMol = int(line.split()[-1])
                molflag = True
            else:
                raise Exception("TINKER output contained more than one line with the words 'Number of Molecules'")
    if molflag:
        print "Detected %i Molecules" % NMol
    if not molflag:
        raise Exception("Failed to detect the number of molecules")

    #=================================================================#
    # Run the simulation for the full system and analyze the results. #
    #=================================================================#
    Rhos, Potentials, Kinetics, Volumes, Dips = run_simulation(args.liquid_xyzfile,args.liquid_keyfile,tstep=timestep,nstep=nsteps,neq=nequiliterations,npr=niterations,verbose=True)
    Energies = Potentials + Kinetics
    V  = Volumes
    pV = pressure * Volumes
    H = Energies + pV

    # Get the energy and dipole gradients.
    print "Post-processing the liquid simulation snapshots."
    G, GDx, GDy, GDz = energy_dipole_derivatives(mvals,h,FF,args.liquid_xyzfile,args.liquid_keyfile,AGrad)
    print

    #==============================================#
    # Now run the simulation for just the monomer. #
    #==============================================#
    _a, mPotentials, mKinetics, _b, _c = run_simulation(args.gas_xyzfile,args.gas_keyfile,tstep=m_timestep,nstep=m_nsteps,neq=m_nequiliterations,npr=m_niterations,pbc=False)
    mEnergies = mPotentials + mKinetics
    mN = len(mEnergies)
    print "Post-processing the gas simulation snapshots."
    mG = energy_derivatives(mvals,h,FF,args.gas_xyzfile,args.gas_keyfile,AGrad)
    print

    numboots = 1000    
    def bootstats(func,inputs):
        # Calculate error using bootstats method
        dboot = []
        for i in range(numboots):
            newins = {k : v[np.random.randint(len(v),size=len(v))] for k,v in inputs.items()}
            dboot.append(np.mean(func(**newins)))
        return func(**inputs),np.std(np.array(dboot))
        
    def calc_arr(b = None, **kwargs):
        # This tomfoolery is required because of Python syntax;
        # default arguments must come after nondefault arguments
        # and kwargs must come at the end.  This function is used
        # in bootstrap error calcs and also in derivative calcs.
        if 'arr' in kwargs:
            arr = kwargs['arr']
        if b == None: b = np.ones(len(arr),dtype=float)
        return bzavg(arr,b)

    # The density in kg/m^3.
    # Note: Not really necessary to use bootstrap here, but good to 
    # demonstrate the principle.
    Rho_avg,  Rho_err  = bootstats(calc_arr,{'arr':Rhos})
    Rho_err *= np.sqrt(statisticalInefficiency(Rhos))

    print "The finite difference step size is:",h

    # The first density derivative
    GRho = mBeta * (flat(np.mat(G) * col(Rhos)) / N - np.mean(Rhos) * np.mean(G, axis=1))

    FDCheck = False

    Sep = printcool("Density: % .4f +- % .4f kg/m^3, Analytic Derivative" % (Rho_avg, Rho_err))
    FF.print_map(vals=GRho)
    print Sep

    if FDCheck:
        Sep = printcool("Numerical Derivative:")
        GRho1 = property_derivatives(mvals, h, FF, args.liquid_xyzfile, args.liquid_keyfile, kT, calc_arr, {'arr':Rhos})
        FF.print_map(vals=GRho1)
        Sep = printcool("Difference (Absolute, Fractional):")
        absfrac = ["% .4e  % .4e" % (i-j, (i-j)/j) for i,j in zip(GRho, GRho1)]
        FF.print_map(vals=absfrac)

    # The enthalpy of vaporization in kJ/mol.
    Ene_avg,  Ene_err  = bootstats(calc_arr,{'arr':Energies})
    mEne_avg, mEne_err = bootstats(calc_arr,{'arr':mEnergies})
    pV_avg,   pV_err   = bootstats(calc_arr,{'arr':pV})
    Ene_err  *= np.sqrt(statisticalInefficiency(Energies))
    mEne_err *= np.sqrt(statisticalInefficiency(mEnergies))
    pV_err   *= np.sqrt(statisticalInefficiency(pV))

    Hvap_avg = mEne_avg - Ene_avg / NMol + kT - np.mean(pV) / NMol
    Hvap_err = np.sqrt(Ene_err**2 / NMol**2 + mEne_err**2 + pV_err**2/NMol**2)

    # Build the first Hvap derivative.
    GHvap = np.mean(G,axis=1)
    GHvap += mBeta * (flat(np.mat(G) * col(Energies)) / N - Ene_avg * np.mean(G, axis=1))
    GHvap /= NMol
    GHvap -= np.mean(mG,axis=1)
    GHvap -= mBeta * (flat(np.mat(mG) * col(mEnergies)) / N - mEne_avg * np.mean(mG, axis=1))
    GHvap *= -1
    GHvap -= mBeta * (flat(np.mat(G) * col(pV)) / N - np.mean(pV) * np.mean(G, axis=1)) / NMol

    print "Box total energy:", np.mean(Energies)
    print "Monomer total energy:", np.mean(mEnergies)

    Sep = printcool("Enthalpy of Vaporization: % .4f +- %.4f kJ/mol, Derivatives below" % (Hvap_avg, Hvap_err))
    FF.print_map(vals=GHvap)
    print Sep

    # Define some things to make the analytic derivatives easier.
    Gbar = np.mean(G,axis=1)
    def covde(vec):
        return flat(np.mat(G)*col(vec))/N - Gbar*np.mean(vec)
    def avg(vec):
        return np.mean(vec)

    ## Thermal expansion coefficient and bootstrap error estimation
    def calc_alpha(b = None, **kwargs):
        if 'h_' in kwargs:
            h_ = kwargs['h_']
        if 'v_' in kwargs:
            v_ = kwargs['v_']
        if b == None: b = np.ones(len(v_),dtype=float)
        return 1/(kT*T) * (bzavg(h_*v_,b)-bzavg(h_,b)*bzavg(v_,b))/bzavg(v_,b)

    Alpha, Alpha_err = bootstats(calc_alpha,{'h_':H, 'v_':V})
    Alpha_err *= np.sqrt(max(statisticalInefficiency(V),statisticalInefficiency(H)))

    ## Thermal expansion coefficient analytic derivative
    GAlpha1 = mBeta * covde(H*V) / avg(V)
    GAlpha2 = Beta * avg(H*V) * covde(V) / avg(V)**2
    GAlpha3 = flat(np.mat(G)*col(V))/N/avg(V) - Gbar
    GAlpha4 = Beta * covde(H)
    GAlpha  = (GAlpha1 + GAlpha2 + GAlpha3 + GAlpha4)/(kT*T)
    Sep = printcool("Thermal expansion coefficient: % .4e +- %.4e K^-1\nAnalytic Derivative:" % (Alpha, Alpha_err))
    FF.print_map(vals=GAlpha)
    if FDCheck:
        GAlpha_fd = property_derivatives(mvals, h, FF, args.liquid_xyzfile, args.liquid_keyfile, kT, calc_alpha, {'h_':H,'v_':V})
        Sep = printcool("Numerical Derivative:")
        FF.print_map(vals=GAlpha_fd)
        Sep = printcool("Difference (Absolute, Fractional):")
        absfrac = ["% .4e  % .4e" % (i-j, (i-j)/j) for i,j in zip(GAlpha, GAlpha_fd)]
        FF.print_map(vals=absfrac)

    ## Isothermal compressibility
    # In [15]: 1.0*bar*nanometer**3/kilojoules_per_mole/item
    # Out[15]: 0.06022141792999999

    bar_unit = 0.06022141793
    def calc_kappa(b=None, **kwargs):
        if 'v_' in kwargs:
            v_ = kwargs['v_']
        if b == None: b = np.ones(len(v_),dtype=float)
        return bar_unit / kT * (bzavg(v_**2,b)-bzavg(v_,b)**2)/bzavg(v_,b)

    Kappa, Kappa_err = bootstats(calc_kappa,{'v_':V})
    Kappa_err *= np.sqrt(statisticalInefficiency(V))

    ## Isothermal compressibility analytic derivative
    Sep = printcool("Isothermal compressibility:    % .4e +- %.4e bar^-1\nAnalytic Derivative:" % (Kappa, Kappa_err))
    GKappa1 = -1 * Beta**2 * avg(V) * covde(V**2) / avg(V)**2
    GKappa2 = +1 * Beta**2 * avg(V**2) * covde(V) / avg(V)**2
    GKappa3 = +1 * Beta**2 * covde(V)
    GKappa  = bar_unit*(GKappa1 + GKappa2 + GKappa3)
    FF.print_map(vals=GKappa)
    if FDCheck:
        GKappa_fd = property_derivatives(mvals, h, FF, args.liquid_xyzfile, args.liquid_keyfile, kT, calc_kappa, {'v_':V})
        Sep = printcool("Numerical Derivative:")
        FF.print_map(vals=GKappa_fd)
        Sep = printcool("Difference (Absolute, Fractional):")
        absfrac = ["% .4e  % .4e" % (i-j, (i-j)/j) for i,j in zip(GKappa, GKappa_fd)]
        FF.print_map(vals=absfrac)

    ## Isobaric heat capacity
    def calc_cp(b=None, **kwargs):
        if 'h_' in kwargs:
            h_ = kwargs['h_']
        if b == None: b = np.ones(len(h_),dtype=float)
        Cp_  = 1/(NMol*kT*T) * (bzavg(h_**2,b) - bzavg(h_,b)**2)
        Cp_ *= 1000 / 4.184
        return Cp_

    Cp, Cp_err = bootstats(calc_cp, {'h_':H})
    Cp_err *= np.sqrt(statisticalInefficiency(H))

    ## Isobaric heat capacity analytic derivative
    GCp1 = 2*covde(H) * 1000 / 4.184 / (NMol*kT*T)
    GCp2 = mBeta*covde(H**2) * 1000 / 4.184 / (NMol*kT*T)
    GCp3 = 2*Beta*avg(H)*covde(H) * 1000 / 4.184 / (NMol*kT*T)
    GCp  = GCp1 + GCp2 + GCp3
    Sep = printcool("Isobaric heat capacity:        % .4e +- %.4e cal mol-1 K-1\nAnalytic Derivative:" % (Cp, Cp_err))
    FF.print_map(vals=GCp)
    if FDCheck:
        GCp_fd = property_derivatives(mvals, h, FF, args.liquid_xyzfile, args.liquid_keyfile, kT, calc_cp, {'h_':H})
        Sep = printcool("Numerical Derivative:")
        FF.print_map(vals=GCp_fd)
        Sep = printcool("Difference (Absolute, Fractional):")
        absfrac = ["% .4e  % .4e" % (i-j, (i-j)/j) for i,j in zip(GCp,GCp_fd)]
        FF.print_map(vals=absfrac)

    ## Dielectric constant
    # eps0 = 8.854187817620e-12 * coulomb**2 / newton / meter**2
    # epsunit = 1.0*(debye**2) / nanometer**3 / BOLTZMANN_CONSTANT_kB / kelvin
    # prefactor = epsunit/eps0/3
    prefactor = 30.348705333964077
    def calc_eps0(b=None, **kwargs):
        if 'd_' in kwargs: # Dipole moment vector.
            d_ = kwargs['d_']
        if 'v_' in kwargs: # Volume.
            v_ = kwargs['v_']
        if b == None: b = np.ones(len(v_),dtype=float)
        dx = d_[:,0]
        dy = d_[:,1]
        dz = d_[:,2]
        D2  = bzavg(dx**2,b)-bzavg(dx,b)**2
        D2 += bzavg(dy**2,b)-bzavg(dy,b)**2
        D2 += bzavg(dz**2,b)-bzavg(dz,b)**2
        return prefactor*D2/bzavg(v_,b)/T

    Eps0, Eps0_err = bootstats(calc_eps0,{'d_':Dips, 'v_':V})
    Eps0 += 1.0
    Eps0_err *= np.sqrt(np.mean([statisticalInefficiency(Dips[:,0]),statisticalInefficiency(Dips[:,1]),statisticalInefficiency(Dips[:,2])]))

    ## Dielectric constant analytic derivative
    Dx = Dips[:,0]
    Dy = Dips[:,1]
    Dz = Dips[:,2]
    D2 = avg(Dx**2)+avg(Dy**2)+avg(Dz**2)-avg(Dx)**2-avg(Dy)**2-avg(Dz)**2
    GD2  = 2*(flat(np.mat(GDx)*col(Dx))/N - avg(Dx)*(np.mean(GDx,axis=1))) - Beta*(covde(Dx**2) - 2*avg(Dx)*covde(Dx))
    GD2 += 2*(flat(np.mat(GDy)*col(Dy))/N - avg(Dy)*(np.mean(GDy,axis=1))) - Beta*(covde(Dy**2) - 2*avg(Dy)*covde(Dy))
    GD2 += 2*(flat(np.mat(GDz)*col(Dz))/N - avg(Dz)*(np.mean(GDz,axis=1))) - Beta*(covde(Dz**2) - 2*avg(Dz)*covde(Dz))
    GEps0 = prefactor*(GD2/avg(V) - mBeta*covde(V)*D2/avg(V)**2)/T
    Sep = printcool("Dielectric constant:           % .4e +- %.4e\nAnalytic Derivative:" % (Eps0, Eps0_err))
    FF.print_map(vals=GEps0)
    if FDCheck:
        GEps0_fd = property_derivatives(mvals, h, FF, args.liquid_xyzfile, args.liquid_keyfile, kT, calc_eps0, {'d_':Dips,'v_':V})
        Sep = printcool("Numerical Derivative:")
        FF.print_map(vals=GEps0_fd)
        Sep = printcool("Difference (Absolute, Fractional):")
        absfrac = ["% .4e  % .4e" % (i-j, (i-j)/j) for i,j in zip(GEps0,GEps0_fd)]
        FF.print_map(vals=absfrac)

    ## Print the final force field.
    pvals = FF.make(mvals)

    with open(os.path.join('npt_result.p'),'w') as f: lp_dump((Rhos, Volumes, Potentials, Energies, Dips, G, [GDx, GDy, GDz], mPotentials, mEnergies, mG, Rho_err, Hvap_err, Alpha_err, Kappa_err, Cp_err, Eps0_err, NMol),f)
예제 #38
0
    def read(self, mvals, AGrad=True, AHess=True):
        
        """
        Read in time series for all previous iterations.
        """

        unpack = forcebalance.nifty.lp_load(open('forcebalance.p'))
        mvals1 = unpack[1]
        if (np.max(np.abs(mvals1 - mvals)) > 1e-3):
            warn_press_key("mvals from forcebalance.p does not match up with internal values! (Are you reading data from a previous run?)\nmvals(call)=%s mvals(disk)=%s" % (mvals, mvals1))

        for dn in range(Counter()-1, -1, -1):
            cwd = os.getcwd()
            os.chdir(self.absrd(inum=dn))
            mprev = np.loadtxt('mvals.txt')
            Results = {}
            Points = []  # These are the phase points for which data exists.
            mPoints = [] # These are the phase points to use for enthalpy of vaporization; if we're scanning pressure then set hvap_wt for higher pressures to zero.
            tt = 0
            logger.info('Reading liquid data from %s\n' % os.getcwd())
            for label, PT in zip(self.Labels, self.PhasePoints):
                if os.path.exists('./%s/npt_result.p.bz2' % label):
                    _exec('bunzip2 ./%s/npt_result.p.bz2' % label, print_command=False)
                if os.path.exists('./%s/npt_result.p' % label):
                    Points.append(PT)
                    Results[tt] = lp_load(open('./%s/npt_result.p' % label))
                    if 'hvap' in self.RefData and PT[0] not in [i[0] for i in mPoints]:
                        mPoints.append(PT)
                    tt += 1
                else:
                    logger.warning('In %s :\n' % os.getcwd())
                    logger.warning('The file ./%s/npt_result.p does not exist so we cannot read it\n' % label)
                    pass
            if len(Points) == 0:
                raise Exception('The liquid simulations have terminated with \x1b[1;91mno readable data\x1b[0m - this is a problem!')
    
            # Assign variable names to all the stuff in npt_result.p
            Rhos, Vols, Potentials, Energies, Dips, Grads, GDips, mPotentials, mEnergies, mGrads, \
                Rho_errs, Hvap_errs, Alpha_errs, Kappa_errs, Cp_errs, Eps0_errs, NMols = ([Results[t][i] for t in range(len(Points))] for i in range(17))
            # Determine the number of molecules
            if len(set(NMols)) != 1:
                logger.error(str(NMols))
                raise Exception('The above list should only contain one number - the number of molecules')
            else:
                NMol = list(set(NMols))[0]
        
            if not self.adapt_errors:
                self.AllResults = defaultdict(lambda:defaultdict(list))

            astrm = astr(mprev)
            if len(Points) != len(self.Labels):
                logger.info("Data sets is not full, will not use for concatenation.\n")
                astrm += "_"*(dn+1)
        
            self.AllResults[astrm]['Pts'].append(Points)
            self.AllResults[astrm]['mPts'].append(mPoints)
            self.AllResults[astrm]['E'].append(np.array(Energies))
            self.AllResults[astrm]['V'].append(np.array(Vols))
            self.AllResults[astrm]['R'].append(np.array(Rhos))
            self.AllResults[astrm]['Dx'].append(np.array([d[:,0] for d in Dips]))
            self.AllResults[astrm]['Dy'].append(np.array([d[:,1] for d in Dips]))
            self.AllResults[astrm]['Dz'].append(np.array([d[:,2] for d in Dips]))
            self.AllResults[astrm]['G'].append(np.array(Grads))
            self.AllResults[astrm]['GDx'].append(np.array([gd[0] for gd in GDips]))
            self.AllResults[astrm]['GDy'].append(np.array([gd[1] for gd in GDips]))
            self.AllResults[astrm]['GDz'].append(np.array([gd[2] for gd in GDips]))
            self.AllResults[astrm]['L'].append(len(Energies[0]))
            self.AllResults[astrm]['Steps'].append(self.liquid_md_steps)
    
            if len(mPoints) > 0:
                self.AllResults[astrm]['mE'].append(np.array([i for pt, i in zip(Points,mEnergies) if pt in mPoints]))
                self.AllResults[astrm]['mG'].append(np.array([i for pt, i in zip(Points,mGrads) if pt in mPoints]))

            os.chdir(cwd)

        return self.get(mvals, AGrad, AHess)
예제 #39
0
    def get(self, mvals, AGrad=True, AHess=True):
        
        """
        Fitting of liquid bulk properties.  This is the current major
        direction of development for ForceBalance.  Basically, fitting
        the QM energies / forces alone does not always give us the
        best simulation behavior.  In many cases it makes more sense
        to try and reproduce some experimentally known data as well.

        In order to reproduce experimentally known data, we need to
        run a simulation and compare the simulation result to
        experiment.  The main challenge here is that the simulations
        are computationally intensive (i.e. they require energy and
        force evaluations), and furthermore the results are noisy.  We
        need to run the simulations automatically and remotely
        (i.e. on clusters) and a good way to calculate the derivatives
        of the simulation results with respect to the parameter values.

        This function contains some experimentally known values of the
        density and enthalpy of vaporization (Hvap) of liquid water.
        It launches the density and Hvap calculations on the cluster,
        and gathers the results / derivatives.  The actual calculation
        of results / derivatives is done in a separate file.

        After the results come back, they are gathered together to form
        an objective function.

        @param[in] mvals Mathematical parameter values
        @param[in] AGrad Switch to turn on analytic gradient
        @param[in] AHess Switch to turn on analytic Hessian
        @return Answer Contribution to the objective function
        
        """
        
        unpack = forcebalance.nifty.lp_load(open('forcebalance.p'))
        mvals1 = unpack[1]
        if (np.max(np.abs(mvals1 - mvals)) > 1e-3):
            warn_press_key("mvals from forcebalance.p does not match up with internal values! (Are you reading data from a previous run?)\nmvals(call)=%s mvals(disk)=%s" % (mvals, mvals1))

        mbar_verbose = False

        Answer = {}

        Results = {}
        Points = []  # These are the phase points for which data exists.
        BPoints = [] # These are the phase points for which we are doing MBAR for the condensed phase.
        mBPoints = [] # These are the phase points for which we are doing MBAR for the monomers.
        mPoints = [] # These are the phase points to use for enthalpy of vaporization; if we're scanning pressure then set hvap_wt for higher pressures to zero.
        tt = 0
        for label, PT in zip(self.Labels, self.PhasePoints):
            if os.path.exists('./%s/npt_result.p.bz2' % label):
                _exec('bunzip2 ./%s/npt_result.p.bz2' % label, print_command=False)
            if os.path.exists('./%s/npt_result.p' % label):
                logger.info('Reading information from ./%s/npt_result.p\n' % label)
                Points.append(PT)
                Results[tt] = lp_load(open('./%s/npt_result.p' % label))
                if 'hvap' in self.RefData and PT[0] not in [i[0] for i in mPoints]:
                    mPoints.append(PT)
                if 'mbar' in self.RefData and PT in self.RefData['mbar'] and self.RefData['mbar'][PT]:
                    BPoints.append(PT)
                    if 'hvap' in self.RefData and PT[0] not in [i[0] for i in mBPoints]:
                        mBPoints.append(PT)
                tt += 1
            else:
                logger.warning('In %s :\n' % os.getcwd())
                logger.warning('The file ./%s/npt_result.p does not exist so we cannot read it\n' % label)
                pass
        if len(Points) == 0:
            raise Exception('The liquid simulations have terminated with \x1b[1;91mno readable data\x1b[0m - this is a problem!')

        # Assign variable names to all the stuff in npt_result.p
        Rhos, Vols, Potentials, Energies, Dips, Grads, GDips, mPotentials, mEnergies, mGrads, \
            Rho_errs, Hvap_errs, Alpha_errs, Kappa_errs, Cp_errs, Eps0_errs, NMols = ([Results[t][i] for t in range(len(Points))] for i in range(17))
        # Determine the number of molecules
        if len(set(NMols)) != 1:
            logger.error(str(NMols))
            raise Exception('The above list should only contain one number - the number of molecules')
        else:
            NMol = list(set(NMols))[0]
    
        if not self.adapt_errors:
            self.AllResults = defaultdict(lambda:defaultdict(list))

        astrm = astr(mvals)
        if len(Points) != len(self.Labels):
            logger.info("Data sets is not full, will not use for concatenation.")
            astrm += "_"*(Counter()+1)
        self.AllResults[astrm]['Pts'].append(Points)
        self.AllResults[astrm]['mPts'].append(Points)
        self.AllResults[astrm]['E'].append(np.array(Energies))
        self.AllResults[astrm]['V'].append(np.array(Vols))
        self.AllResults[astrm]['R'].append(np.array(Rhos))
        self.AllResults[astrm]['Dx'].append(np.array([d[:,0] for d in Dips]))
        self.AllResults[astrm]['Dy'].append(np.array([d[:,1] for d in Dips]))
        self.AllResults[astrm]['Dz'].append(np.array([d[:,2] for d in Dips]))
        self.AllResults[astrm]['G'].append(np.array(Grads))
        self.AllResults[astrm]['GDx'].append(np.array([gd[0] for gd in GDips]))
        self.AllResults[astrm]['GDy'].append(np.array([gd[1] for gd in GDips]))
        self.AllResults[astrm]['GDz'].append(np.array([gd[2] for gd in GDips]))
        self.AllResults[astrm]['L'].append(len(Energies[0]))
        self.AllResults[astrm]['Steps'].append(self.liquid_md_steps)

        if len(mPoints) > 0:
            self.AllResults[astrm]['mE'].append(np.array([i for pt, i in zip(Points,mEnergies) if pt in mPoints]))
            self.AllResults[astrm]['mG'].append(np.array([i for pt, i in zip(Points,mGrads) if pt in mPoints]))

        # Number of data sets belonging to this value of the parameters.
        Nrpt = len(self.AllResults[astrm]['R'])
        sumsteps = sum(self.AllResults[astrm]['Steps'])
        if self.liquid_md_steps != sumsteps:
            printcool("This objective function evaluation combines %i datasets\n" \
                          "Increasing simulation length: %i -> %i steps" % \
                          (Nrpt, self.liquid_md_steps, sumsteps), color=6)
            if self.liquid_md_steps * 2 != sumsteps:
                raise RuntimeError("Spoo!")
            self.liquid_eq_steps *= 2
            self.liquid_md_steps *= 2
            self.gas_eq_steps *= 2
            self.gas_md_steps *= 2

        # Concatenate along the data-set axis (more than 1 element  if we've returned to these parameters.)
        E, V, R, Dx, Dy, Dz = \
            (np.hstack(tuple(self.AllResults[astrm][i])) for i in \
                 ['E', 'V', 'R', 'Dx', 'Dy', 'Dz'])

        G, GDx, GDy, GDz = \
            (np.hstack((np.concatenate(tuple(self.AllResults[astrm][i]), axis=2))) for i in ['G', 'GDx', 'GDy', 'GDz'])

        if len(mPoints) > 0:
            mE = np.hstack(tuple(self.AllResults[astrm]['mE']))
            mG = np.hstack((np.concatenate(tuple(self.AllResults[astrm]['mG']), axis=2)))
        Rho_calc = OrderedDict([])
        Rho_grad = OrderedDict([])
        Rho_std  = OrderedDict([])
        Hvap_calc = OrderedDict([])
        Hvap_grad = OrderedDict([])
        Hvap_std  = OrderedDict([])
        Alpha_calc = OrderedDict([])
        Alpha_grad = OrderedDict([])
        Alpha_std  = OrderedDict([])
        Kappa_calc = OrderedDict([])
        Kappa_grad = OrderedDict([])
        Kappa_std  = OrderedDict([])
        Cp_calc = OrderedDict([])
        Cp_grad = OrderedDict([])
        Cp_std  = OrderedDict([])
        Eps0_calc = OrderedDict([])
        Eps0_grad = OrderedDict([])
        Eps0_std  = OrderedDict([])

        # The unit that converts atmospheres * nm**3 into kj/mol :)
        pvkj=0.061019351687175

        # Run MBAR using the total energies. Required for estimates that use the kinetic energy.
        BSims = len(BPoints)
        Shots = len(E[0])
        N_k = np.ones(BSims)*Shots
        # Use the value of the energy for snapshot t from simulation k at potential m
        U_kln = np.zeros([BSims,BSims,Shots])
        for m, PT in enumerate(BPoints):
            T = PT[0]
            P = PT[1] / 1.01325 if PT[2] == 'bar' else PT[1]
            beta = 1. / (kb * T)
            for k in range(BSims):
                # The correct Boltzmann factors include PV.
                # Note that because the Boltzmann factors are computed from the conditions at simulation "m",
                # the pV terms must be rescaled to the pressure at simulation "m".
                kk = Points.index(BPoints[k])
                U_kln[k, m, :]   = E[kk] + P*V[kk]*pvkj
                U_kln[k, m, :]  *= beta
        W1 = None
        if len(BPoints) > 1:
            logger.info("Running MBAR analysis on %i states...\n" % len(BPoints))
            mbar = pymbar.MBAR(U_kln, N_k, verbose=mbar_verbose, relative_tolerance=5.0e-8)
            W1 = mbar.getWeights()
            logger.info("Done\n")
        elif len(BPoints) == 1:
            W1 = np.ones((BPoints*Shots,BPoints))
            W1 /= BPoints*Shots
        
        def fill_weights(weights, phase_points, mbar_points, snapshots):
            """ Fill in the weight matrix with MBAR weights where MBAR was run, 
            and equal weights otherwise. """
            new_weights = np.zeros([len(phase_points)*snapshots,len(phase_points)])
            for m, PT in enumerate(phase_points):
                if PT in mbar_points:
                    mm = mbar_points.index(PT)
                    for kk, PT1 in enumerate(mbar_points):
                        k = phase_points.index(PT1)
                        logger.debug("Will fill W2[%i:%i,%i] with W1[%i:%i,%i]\n" % (k*snapshots,k*snapshots+snapshots,m,kk*snapshots,kk*snapshots+snapshots,mm))
                        new_weights[k*snapshots:(k+1)*snapshots,m] = weights[kk*snapshots:(kk+1)*snapshots,mm]
                else:
                    logger.debug("Will fill W2[%i:%i,%i] with equal weights\n" % (m*snapshots,(m+1)*snapshots,m))
                    new_weights[m*snapshots:(m+1)*snapshots,m] = 1.0/snapshots
            return new_weights
        
        W2 = fill_weights(W1, Points, BPoints, Shots)

        if len(mPoints) > 0:
            # Run MBAR on the monomers.  This is barely necessary.
            mW1 = None
            mShots = len(mE[0])
            if len(mBPoints) > 0:
                mBSims = len(mBPoints)
                mN_k = np.ones(mBSims)*mShots
                mU_kln = np.zeros([mBSims,mBSims,mShots])
                for m, PT in enumerate(mBPoints):
                    T = PT[0]
                    beta = 1. / (kb * T)
                    for k in range(mBSims):
                        kk = Points.index(mBPoints[k])
                        mU_kln[k, m, :]  = mE[kk]
                        mU_kln[k, m, :] *= beta
                if np.abs(np.std(mE)) > 1e-6 and mBSims > 1:
                    mmbar = pymbar.MBAR(mU_kln, mN_k, verbose=False, relative_tolerance=5.0e-8, method='self-consistent-iteration')
                    mW1 = mmbar.getWeights()
            elif len(mBPoints) == 1:
                mW1 = np.ones((mBSims*mShots,mSims))
                mW1 /= mBSims*mShots
            mW2 = fill_weights(mW1, mPoints, mBPoints, mShots)
         
        if self.do_self_pol:
            EPol = self.polarization_correction(mvals)
            GEPol = np.array([(f12d3p(fdwrap(self.polarization_correction, mvals, p), h = self.h, f0 = EPol)[0] if p in self.pgrad else 0.0) for p in range(self.FF.np)])
            bar = printcool("Self-polarization correction to \nenthalpy of vaporization is % .3f kJ/mol%s" % (EPol, ", Derivative:" if AGrad else ""))
            if AGrad:
                self.FF.print_map(vals=GEPol)
                logger.info(bar)

        # Arrays must be flattened now for calculation of properties.
        E = E.flatten()
        V = V.flatten()
        R = R.flatten()
        Dx = Dx.flatten()
        Dy = Dy.flatten()
        Dz = Dz.flatten()
        if len(mPoints) > 0: mE = mE.flatten()
            
        for i, PT in enumerate(Points):
            T = PT[0]
            P = PT[1] / 1.01325 if PT[2] == 'bar' else PT[1]
            PV = P*V*pvkj
            H = E + PV
            # The weights that we want are the last ones.
            W = flat(W2[:,i])
            C = weight_info(W, PT, np.ones(len(Points))*Shots, verbose=mbar_verbose)
            Gbar = flat(np.matrix(G)*col(W))
            mBeta = -1/kb/T
            Beta  = 1/kb/T
            kT    = kb*T
            # Define some things to make the analytic derivatives easier.
            def avg(vec):
                return np.dot(W,vec)
            def covde(vec):
                return flat(np.matrix(G)*col(W*vec)) - avg(vec)*Gbar
            def deprod(vec):
                return flat(np.matrix(G)*col(W*vec))
            ## Density.
            Rho_calc[PT]   = np.dot(W,R)
            Rho_grad[PT]   = mBeta*(flat(np.matrix(G)*col(W*R)) - np.dot(W,R)*Gbar)
            ## Enthalpy of vaporization.
            if PT in mPoints:
                ii = mPoints.index(PT)
                mW = flat(mW2[:,ii])
                mGbar = flat(np.matrix(mG)*col(mW))
                Hvap_calc[PT]  = np.dot(mW,mE) - np.dot(W,E)/NMol + kb*T - np.dot(W, PV)/NMol
                Hvap_grad[PT]  = mGbar + mBeta*(flat(np.matrix(mG)*col(mW*mE)) - np.dot(mW,mE)*mGbar)
                Hvap_grad[PT] -= (Gbar + mBeta*(flat(np.matrix(G)*col(W*E)) - np.dot(W,E)*Gbar)) / NMol
                Hvap_grad[PT] -= (mBeta*(flat(np.matrix(G)*col(W*PV)) - np.dot(W,PV)*Gbar)) / NMol
                if self.do_self_pol:
                    Hvap_calc[PT] -= EPol
                    Hvap_grad[PT] -= GEPol
                if hasattr(self,'use_cni') and self.use_cni:
                    if not ('cni' in self.RefData and self.RefData['cni'][PT]):
                        raise RuntimeError('Asked for a nonideality correction but not provided in reference data (data.csv).  Either disable the option in data.csv or add data.')
                    logger.debug("Adding % .3f to enthalpy of vaporization at " % self.RefData['cni'][PT] + str(PT) + '\n')
                    Hvap_calc[PT] += self.RefData['cni'][PT]
                if hasattr(self,'use_cvib_intra') and self.use_cvib_intra:
                    if not ('cvib_intra' in self.RefData and self.RefData['cvib_intra'][PT]):
                        raise RuntimeError('Asked for a quantum intramolecular vibrational correction but not provided in reference data (data.csv).  Either disable the option in data.csv or add data.')
                    logger.debug("Adding % .3f to enthalpy of vaporization at " % self.RefData['cvib_intra'][PT] + str(PT) + '\n')
                    Hvap_calc[PT] += self.RefData['cvib_intra'][PT]
                if hasattr(self,'use_cvib_inter') and self.use_cvib_inter:
                    if not ('cvib_inter' in self.RefData and self.RefData['cvib_inter'][PT]):
                        raise RuntimeError('Asked for a quantum intermolecular vibrational correction but not provided in reference data (data.csv).  Either disable the option in data.csv or add data.')
                    logger.debug("Adding % .3f to enthalpy of vaporization at " % self.RefData['cvib_inter'][PT] + str(PT) + '\n')
                    Hvap_calc[PT] += self.RefData['cvib_inter'][PT]
            else:
                Hvap_calc[PT]  = 0.0
                Hvap_grad[PT]  = np.zeros(self.FF.np)
            ## Thermal expansion coefficient.
            Alpha_calc[PT] = 1e4 * (avg(H*V)-avg(H)*avg(V))/avg(V)/(kT*T)
            GAlpha1 = -1 * Beta * deprod(H*V) * avg(V) / avg(V)**2
            GAlpha2 = +1 * Beta * avg(H*V) * deprod(V) / avg(V)**2
            GAlpha3 = deprod(V)/avg(V) - Gbar
            GAlpha4 = Beta * covde(H)
            Alpha_grad[PT] = 1e4 * (GAlpha1 + GAlpha2 + GAlpha3 + GAlpha4)/(kT*T)
            ## Isothermal compressibility.
            bar_unit = 0.06022141793 * 1e6
            Kappa_calc[PT] = bar_unit / kT * (avg(V**2)-avg(V)**2)/avg(V)
            GKappa1 = +1 * Beta**2 * avg(V**2) * deprod(V) / avg(V)**2
            GKappa2 = -1 * Beta**2 * avg(V) * deprod(V**2) / avg(V)**2
            GKappa3 = +1 * Beta**2 * covde(V)
            Kappa_grad[PT] = bar_unit*(GKappa1 + GKappa2 + GKappa3)
            ## Isobaric heat capacity.
            Cp_calc[PT] = 1000/(4.184*NMol*kT*T) * (avg(H**2) - avg(H)**2)
            if hasattr(self,'use_cvib_intra') and self.use_cvib_intra:
                logger.debug("Adding " + str(self.RefData['devib_intra'][PT]) + " to the heat capacity\n")
                Cp_calc[PT] += self.RefData['devib_intra'][PT]
            if hasattr(self,'use_cvib_inter') and self.use_cvib_inter:
                logger.debug("Adding " + str(self.RefData['devib_inter'][PT]) + " to the heat capacity\n")
                Cp_calc[PT] += self.RefData['devib_inter'][PT]
            GCp1 = 2*covde(H) * 1000 / 4.184 / (NMol*kT*T)
            GCp2 = mBeta*covde(H**2) * 1000 / 4.184 / (NMol*kT*T)
            GCp3 = 2*Beta*avg(H)*covde(H) * 1000 / 4.184 / (NMol*kT*T)
            Cp_grad[PT] = GCp1 + GCp2 + GCp3
            ## Static dielectric constant.
            prefactor = 30.348705333964077
            D2 = avg(Dx**2)+avg(Dy**2)+avg(Dz**2)-avg(Dx)**2-avg(Dy)**2-avg(Dz)**2
            Eps0_calc[PT] = 1.0 + prefactor*(D2/avg(V))/T
            GD2  = 2*(flat(np.matrix(GDx)*col(W*Dx)) - avg(Dx)*flat(np.matrix(GDx)*col(W))) - Beta*(covde(Dx**2) - 2*avg(Dx)*covde(Dx))
            GD2 += 2*(flat(np.matrix(GDy)*col(W*Dy)) - avg(Dy)*flat(np.matrix(GDy)*col(W))) - Beta*(covde(Dy**2) - 2*avg(Dy)*covde(Dy))
            GD2 += 2*(flat(np.matrix(GDz)*col(W*Dz)) - avg(Dz)*flat(np.matrix(GDz)*col(W))) - Beta*(covde(Dz**2) - 2*avg(Dz)*covde(Dz))
            Eps0_grad[PT] = prefactor*(GD2/avg(V) - mBeta*covde(V)*D2/avg(V)**2)/T
            ## Estimation of errors.
            Rho_std[PT]    = np.sqrt(sum(C**2 * np.array(Rho_errs)**2))
            if PT in mPoints:
                Hvap_std[PT]   = np.sqrt(sum(C**2 * np.array(Hvap_errs)**2))
            else:
                Hvap_std[PT]   = 0.0
            Alpha_std[PT]   = np.sqrt(sum(C**2 * np.array(Alpha_errs)**2)) * 1e4
            Kappa_std[PT]   = np.sqrt(sum(C**2 * np.array(Kappa_errs)**2)) * 1e6
            Cp_std[PT]   = np.sqrt(sum(C**2 * np.array(Cp_errs)**2))
            Eps0_std[PT]   = np.sqrt(sum(C**2 * np.array(Eps0_errs)**2))

        # Get contributions to the objective function
        X_Rho, G_Rho, H_Rho, RhoPrint = self.objective_term(Points, 'rho', Rho_calc, Rho_std, Rho_grad, name="Density")
        X_Hvap, G_Hvap, H_Hvap, HvapPrint = self.objective_term(Points, 'hvap', Hvap_calc, Hvap_std, Hvap_grad, name="H_vap", SubAverage=self.hvap_subaverage)
        X_Alpha, G_Alpha, H_Alpha, AlphaPrint = self.objective_term(Points, 'alpha', Alpha_calc, Alpha_std, Alpha_grad, name="Thermal Expansion")
        X_Kappa, G_Kappa, H_Kappa, KappaPrint = self.objective_term(Points, 'kappa', Kappa_calc, Kappa_std, Kappa_grad, name="Compressibility")
        X_Cp, G_Cp, H_Cp, CpPrint = self.objective_term(Points, 'cp', Cp_calc, Cp_std, Cp_grad, name="Heat Capacity")
        X_Eps0, G_Eps0, H_Eps0, Eps0Print = self.objective_term(Points, 'eps0', Eps0_calc, Eps0_std, Eps0_grad, name="Dielectric Constant")

        Gradient = np.zeros(self.FF.np)
        Hessian = np.zeros((self.FF.np,self.FF.np))

        if X_Rho == 0: self.w_rho = 0.0
        if X_Hvap == 0: self.w_hvap = 0.0
        if X_Alpha == 0: self.w_alpha = 0.0
        if X_Kappa == 0: self.w_kappa = 0.0
        if X_Cp == 0: self.w_cp = 0.0
        if X_Eps0 == 0: self.w_eps0 = 0.0

        w_tot = self.w_rho + self.w_hvap + self.w_alpha + self.w_kappa + self.w_cp + self.w_eps0
        w_1 = self.w_rho / w_tot
        w_2 = self.w_hvap / w_tot
        w_3 = self.w_alpha / w_tot
        w_4 = self.w_kappa / w_tot
        w_5 = self.w_cp / w_tot
        w_6 = self.w_eps0 / w_tot

        Objective    = w_1 * X_Rho + w_2 * X_Hvap + w_3 * X_Alpha + w_4 * X_Kappa + w_5 * X_Cp + w_6 * X_Eps0
        if AGrad:
            Gradient = w_1 * G_Rho + w_2 * G_Hvap + w_3 * G_Alpha + w_4 * G_Kappa + w_5 * G_Cp + w_6 * G_Eps0
        if AHess:
            Hessian  = w_1 * H_Rho + w_2 * H_Hvap + w_3 * H_Alpha + w_4 * H_Kappa + w_5 * H_Cp + w_6 * H_Eps0

        if not in_fd():
            self.Xp = {"Rho" : X_Rho, "Hvap" : X_Hvap, "Alpha" : X_Alpha, 
                           "Kappa" : X_Kappa, "Cp" : X_Cp, "Eps0" : X_Eps0}
            self.Wp = {"Rho" : w_1, "Hvap" : w_2, "Alpha" : w_3, 
                           "Kappa" : w_4, "Cp" : w_5, "Eps0" : w_6}
            self.Pp = {"Rho" : RhoPrint, "Hvap" : HvapPrint, "Alpha" : AlphaPrint, 
                           "Kappa" : KappaPrint, "Cp" : CpPrint, "Eps0" : Eps0Print}
            if AGrad:
                self.Gp = {"Rho" : G_Rho, "Hvap" : G_Hvap, "Alpha" : G_Alpha, 
                               "Kappa" : G_Kappa, "Cp" : G_Cp, "Eps0" : G_Eps0}
            self.Objective = Objective

        Answer = {'X':Objective, 'G':Gradient, 'H':Hessian}
        return Answer
예제 #40
0
파일: lipid.py 프로젝트: ebran/forcebalance
    def get(self, mvals, AGrad=True, AHess=True):
        
        """
        Fitting of lipid bulk properties.  This is the current major
        direction of development for ForceBalance.  Basically, fitting
        the QM energies / forces alone does not always give us the
        best simulation behavior.  In many cases it makes more sense
        to try and reproduce some experimentally known data as well.

        In order to reproduce experimentally known data, we need to
        run a simulation and compare the simulation result to
        experiment.  The main challenge here is that the simulations
        are computationally intensive (i.e. they require energy and
        force evaluations), and furthermore the results are noisy.  We
        need to run the simulations automatically and remotely
        (i.e. on clusters) and a good way to calculate the derivatives
        of the simulation results with respect to the parameter values.

        This function contains some experimentally known values of the
        density and enthalpy of vaporization (Hvap) of lipid water.
        It launches the density and Hvap calculations on the cluster,
        and gathers the results / derivatives.  The actual calculation
        of results / derivatives is done in a separate file.

        After the results come back, they are gathered together to form
        an objective function.

        @param[in] mvals Mathematical parameter values
        @param[in] AGrad Switch to turn on analytic gradient
        @param[in] AHess Switch to turn on analytic Hessian
        @return Answer Contribution to the objective function
        
        """

        mbar_verbose = False

        Answer = {}

        Results = {}
        Points = []  # These are the phase points for which data exists.
        BPoints = [] # These are the phase points for which we are doing MBAR for the condensed phase.
        tt = 0
        for label, PT in zip(self.Labels, self.PhasePoints):
            if os.path.exists('./%s/npt_result.p.bz2' % label):
                _exec('bunzip2 ./%s/npt_result.p.bz2' % label, print_command=False)
            elif os.path.exists('./%s/npt_result.p' % label): pass
            else:
                logger.warning('In %s :\n' % os.getcwd())
                logger.warning('The file ./%s/npt_result.p.bz2 does not exist so we cannot unzip it\n' % label)
            if os.path.exists('./%s/npt_result.p' % label):
                logger.info('Reading information from ./%s/npt_result.p\n' % label)
                Points.append(PT)
                Results[tt] = lp_load(open('./%s/npt_result.p' % label))
                tt += 1
            else:
                logger.warning('The file ./%s/npt_result.p does not exist so we cannot read it\n' % label)
                pass
                # for obs in self.RefData:
                #     del self.RefData[obs][PT]
        if len(Points) == 0:
            raise Exception('The lipid simulations have terminated with \x1b[1;91mno readable data\x1b[0m - this is a problem!')

        # Assign variable names to all the stuff in npt_result.p
        Rhos, Vols, Potentials, Energies, Dips, Grads, GDips, \
            Rho_errs, Alpha_errs, Kappa_errs, Cp_errs, Eps0_errs, NMols, Als, Al_errs, Scds, Scd_errs = ([Results[t][i] for t in range(len(Points))] for i in range(17))
        # Determine the number of molecules
        if len(set(NMols)) != 1:
            logger.error(str(NMols))
            raise Exception('The above list should only contain one number - the number of molecules')
        else:
            NMol = list(set(NMols))[0]
    
        R  = np.array(list(itertools.chain(*list(Rhos))))
        V  = np.array(list(itertools.chain(*list(Vols))))
        E  = np.array(list(itertools.chain(*list(Energies))))
        Dx = np.array(list(itertools.chain(*list(d[:,0] for d in Dips))))
        Dy = np.array(list(itertools.chain(*list(d[:,1] for d in Dips))))
        Dz = np.array(list(itertools.chain(*list(d[:,2] for d in Dips))))
        G  = np.hstack(tuple(Grads))
        GDx = np.hstack(tuple(gd[0] for gd in GDips))
        GDy = np.hstack(tuple(gd[1] for gd in GDips))
        GDz = np.hstack(tuple(gd[2] for gd in GDips))
        A  = np.array(list(itertools.chain(*list(Als))))
        S  = np.array(list(itertools.chain(*list(Scds))))

        Rho_calc = OrderedDict([])
        Rho_grad = OrderedDict([])
        Rho_std  = OrderedDict([])
        Alpha_calc = OrderedDict([])
        Alpha_grad = OrderedDict([])
        Alpha_std  = OrderedDict([])
        Kappa_calc = OrderedDict([])
        Kappa_grad = OrderedDict([])
        Kappa_std  = OrderedDict([])
        Cp_calc = OrderedDict([])
        Cp_grad = OrderedDict([])
        Cp_std  = OrderedDict([])
        Eps0_calc = OrderedDict([])
        Eps0_grad = OrderedDict([])
        Eps0_std  = OrderedDict([])
        Al_calc = OrderedDict([])
        Al_grad = OrderedDict([])
        Al_std  = OrderedDict([])
        Scd_calc = OrderedDict([])
        Scd_grad = OrderedDict([])
        Scd_std  = OrderedDict([])

        # The unit that converts atmospheres * nm**3 into kj/mol :)
        pvkj=0.061019351687175
 
        # Run MBAR using the total energies. Required for estimates that use the kinetic energy.
        BSims = len(BPoints)
        Shots = len(Energies[0])
        N_k = np.ones(BSims)*Shots
        # Use the value of the energy for snapshot t from simulation k at potential m
        U_kln = np.zeros([BSims,BSims,Shots])
        for m, PT in enumerate(BPoints):
            T = PT[0]
            P = PT[1] / 1.01325 if PT[2] == 'bar' else PT[1]
            beta = 1. / (kb * T)
            for k in range(BSims):
                # The correct Boltzmann factors include PV.
                # Note that because the Boltzmann factors are computed from the conditions at simulation "m",
                # the pV terms must be rescaled to the pressure at simulation "m".
                kk = Points.index(BPoints[k])
                U_kln[k, m, :]   = Energies[kk] + P*Vols[kk]*pvkj
                U_kln[k, m, :]  *= beta
        W1 = None
        if len(BPoints) > 1:
            logger.info("Running MBAR analysis on %i states...\n" % len(BPoints))
            mbar = pymbar.MBAR(U_kln, N_k, verbose=mbar_verbose, relative_tolerance=5.0e-8)
            W1 = mbar.getWeights()
            logger.info("Done\n")
        elif len(BPoints) == 1:
            W1 = np.ones((BPoints*Shots,BPoints))
            W1 /= BPoints*Shots
        
        def fill_weights(weights, phase_points, mbar_points, snapshots):
            """ Fill in the weight matrix with MBAR weights where MBAR was run, 
            and equal weights otherwise. """
            new_weights = np.zeros([len(phase_points)*snapshots,len(phase_points)])
            for m, PT in enumerate(phase_points):
                if PT in mbar_points:
                    mm = mbar_points.index(PT)
                    for kk, PT1 in enumerate(mbar_points):
                        k = phase_points.index(PT1)
                        logger.debug("Will fill W2[%i:%i,%i] with W1[%i:%i,%i]\n" % (k*snapshots,k*snapshots+snapshots,m,kk*snapshots,kk*snapshots+snapshots,mm))
                        new_weights[k*snapshots:(k+1)*snapshots,m] = weights[kk*snapshots:(kk+1)*snapshots,mm]
                else:
                    logger.debug("Will fill W2[%i:%i,%i] with equal weights\n" % (m*snapshots,(m+1)*snapshots,m))
                    new_weights[m*snapshots:(m+1)*snapshots,m] = 1.0/snapshots
            return new_weights
        
        W2 = fill_weights(W1, Points, BPoints, Shots)

        if self.do_self_pol:
            EPol = self.polarization_correction(mvals)
            GEPol = np.array([f12d3p(fdwrap(self.polarization_correction, mvals, p), h = self.h, f0 = EPol)[0] for p in range(self.FF.np)])
            bar = printcool("Self-polarization correction to \nenthalpy of vaporization is % .3f kJ/mol%s" % (EPol, ", Derivative:" if AGrad else ""))
            if AGrad:
                self.FF.print_map(vals=GEPol)
                logger.info(bar)
            
        for i, PT in enumerate(Points):
            T = PT[0]
            P = PT[1] / 1.01325 if PT[2] == 'bar' else PT[1]
            PV = P*V*pvkj
            H = E + PV
            # The weights that we want are the last ones.
            W = flat(W2[:,i])
            C = weight_info(W, PT, np.ones(len(Points))*Shots, verbose=mbar_verbose)
            Gbar = flat(np.mat(G)*col(W))
            mBeta = -1/kb/T
            Beta  = 1/kb/T
            kT    = kb*T
            # Define some things to make the analytic derivatives easier.
            def avg(vec):
                return np.dot(W,vec)
            def covde(vec):
                return flat(np.mat(G)*col(W*vec)) - avg(vec)*Gbar
            def deprod(vec):
                return flat(np.mat(G)*col(W*vec))
            ## Density.
            Rho_calc[PT]   = np.dot(W,R)
            Rho_grad[PT]   = mBeta*(flat(np.mat(G)*col(W*R)) - np.dot(W,R)*Gbar)
            ## Ignore enthalpy.
            ## Thermal expansion coefficient.
            Alpha_calc[PT] = 1e4 * (avg(H*V)-avg(H)*avg(V))/avg(V)/(kT*T)
            GAlpha1 = -1 * Beta * deprod(H*V) * avg(V) / avg(V)**2
            GAlpha2 = +1 * Beta * avg(H*V) * deprod(V) / avg(V)**2
            GAlpha3 = deprod(V)/avg(V) - Gbar
            GAlpha4 = Beta * covde(H)
            Alpha_grad[PT] = 1e4 * (GAlpha1 + GAlpha2 + GAlpha3 + GAlpha4)/(kT*T)
            ## Isothermal compressibility.
            bar_unit = 0.06022141793 * 1e6
            Kappa_calc[PT] = bar_unit / kT * (avg(V**2)-avg(V)**2)/avg(V)
            GKappa1 = +1 * Beta**2 * avg(V**2) * deprod(V) / avg(V)**2
            GKappa2 = -1 * Beta**2 * avg(V) * deprod(V**2) / avg(V)**2
            GKappa3 = +1 * Beta**2 * covde(V)
            Kappa_grad[PT] = bar_unit*(GKappa1 + GKappa2 + GKappa3)
            ## Isobaric heat capacity.
            Cp_calc[PT] = 1000/(4.184*NMol*kT*T) * (avg(H**2) - avg(H)**2)
            if hasattr(self,'use_cvib_intra') and self.use_cvib_intra:
                logger.debug("Adding " + str(self.RefData['devib_intra'][PT]) + " to the heat capacity\n")
                Cp_calc[PT] += self.RefData['devib_intra'][PT]
            if hasattr(self,'use_cvib_inter') and self.use_cvib_inter:
                logger.debug("Adding " + str(self.RefData['devib_inter'][PT]) + " to the heat capacity\n")
                Cp_calc[PT] += self.RefData['devib_inter'][PT]
            GCp1 = 2*covde(H) * 1000 / 4.184 / (NMol*kT*T)
            GCp2 = mBeta*covde(H**2) * 1000 / 4.184 / (NMol*kT*T)
            GCp3 = 2*Beta*avg(H)*covde(H) * 1000 / 4.184 / (NMol*kT*T)
            Cp_grad[PT] = GCp1 + GCp2 + GCp3
            ## Static dielectric constant.
            prefactor = 30.348705333964077
            D2 = avg(Dx**2)+avg(Dy**2)+avg(Dz**2)-avg(Dx)**2-avg(Dy)**2-avg(Dz)**2
            Eps0_calc[PT] = 1.0 + prefactor*(D2/avg(V))/T
            GD2  = 2*(flat(np.mat(GDx)*col(W*Dx)) - avg(Dx)*flat(np.mat(GDx)*col(W))) - Beta*(covde(Dx**2) - 2*avg(Dx)*covde(Dx))
            GD2 += 2*(flat(np.mat(GDy)*col(W*Dy)) - avg(Dy)*flat(np.mat(GDy)*col(W))) - Beta*(covde(Dy**2) - 2*avg(Dy)*covde(Dy))
            GD2 += 2*(flat(np.mat(GDz)*col(W*Dz)) - avg(Dz)*flat(np.mat(GDz)*col(W))) - Beta*(covde(Dz**2) - 2*avg(Dz)*covde(Dz))
            Eps0_grad[PT] = prefactor*(GD2/avg(V) - mBeta*covde(V)*D2/avg(V)**2)/T
            ## Average area per lipid
            Al_calc[PT]   = np.dot(W,A)
            Al_grad[PT]   = mBeta*(flat(np.mat(G)*col(W*A)) - np.dot(W,A)*Gbar)
            ## Deuterium order parameter
            Scd_calc[PT]   = np.dot(W,S)
            Scd_grad[PT]   = mBeta * (flat(np.average(np.mat(G) * (S * W[:, np.newaxis]), axis = 1)) - np.average(np.average(S * W[:, np.newaxis], axis = 0), axis = 0) * Gbar) 
            ## Estimation of errors.
            Rho_std[PT]    = np.sqrt(sum(C**2 * np.array(Rho_errs)**2))
            Alpha_std[PT]   = np.sqrt(sum(C**2 * np.array(Alpha_errs)**2)) * 1e4
            Kappa_std[PT]   = np.sqrt(sum(C**2 * np.array(Kappa_errs)**2)) * 1e6
            Cp_std[PT]   = np.sqrt(sum(C**2 * np.array(Cp_errs)**2))
            Eps0_std[PT]   = np.sqrt(sum(C**2 * np.array(Eps0_errs)**2))
            Al_std[PT]    = np.sqrt(sum(C**2 * np.array(Al_errs)**2))
            Scd_std[PT]    = np.sqrt(sum(np.mat(C**2) * np.array(Scd_errs)**2))

        # Get contributions to the objective function
        X_Rho, G_Rho, H_Rho, RhoPrint = self.objective_term(Points, 'rho', Rho_calc, Rho_std, Rho_grad, name="Density")
        X_Alpha, G_Alpha, H_Alpha, AlphaPrint = self.objective_term(Points, 'alpha', Alpha_calc, Alpha_std, Alpha_grad, name="Thermal Expansion")
        X_Kappa, G_Kappa, H_Kappa, KappaPrint = self.objective_term(Points, 'kappa', Kappa_calc, Kappa_std, Kappa_grad, name="Compressibility")
        X_Cp, G_Cp, H_Cp, CpPrint = self.objective_term(Points, 'cp', Cp_calc, Cp_std, Cp_grad, name="Heat Capacity")
        X_Eps0, G_Eps0, H_Eps0, Eps0Print = self.objective_term(Points, 'eps0', Eps0_calc, Eps0_std, Eps0_grad, name="Dielectric Constant")
        X_Al, G_Al, H_Al, AlPrint = self.objective_term(Points, 'al', Al_calc, Al_std, Al_grad, name="Avg Area per Lipid")
        X_Scd, G_Scd, H_Scd, ScdPrint = self.objective_term(Points, 'scd', Scd_calc, Scd_std, Scd_grad, name="Deuterium Order Parameter")

        Gradient = np.zeros(self.FF.np)
        Hessian = np.zeros((self.FF.np,self.FF.np))

        if X_Rho == 0: self.w_rho = 0.0
        if X_Alpha == 0: self.w_alpha = 0.0
        if X_Kappa == 0: self.w_kappa = 0.0
        if X_Cp == 0: self.w_cp = 0.0
        if X_Eps0 == 0: self.w_eps0 = 0.0
        if X_Al == 0: self.w_al = 0.0
        if X_Scd == 0: self.w_scd = 0.0

        w_tot = self.w_rho + self.w_alpha + self.w_kappa + self.w_cp + self.w_eps0 + self.w_al + self.w_scd
        w_1 = self.w_rho / w_tot
        w_3 = self.w_alpha / w_tot
        w_4 = self.w_kappa / w_tot
        w_5 = self.w_cp / w_tot
        w_6 = self.w_eps0 / w_tot
        w_7 = self.w_al / w_tot
        w_8 = self.w_scd / w_tot

        Objective    = w_1 * X_Rho + w_3 * X_Alpha + w_4 * X_Kappa + w_5 * X_Cp + w_6 * X_Eps0 + w_7 * X_Al + w_8 * X_Scd
        if AGrad:
            Gradient = w_1 * G_Rho + w_3 * G_Alpha + w_4 * G_Kappa + w_5 * G_Cp + w_6 * G_Eps0 + w_7 * G_Al + w_8 * G_Scd
        if AHess:
            Hessian  = w_1 * H_Rho + w_3 * H_Alpha + w_4 * H_Kappa + w_5 * H_Cp + w_6 * H_Eps0 + w_7 * H_Al + w_8 * H_Scd

        if not in_fd():
            self.Xp = {"Rho" : X_Rho, "Alpha" : X_Alpha, 
                           "Kappa" : X_Kappa, "Cp" : X_Cp, "Eps0" : X_Eps0, "Al" : X_Al, "Scd" : X_Scd}
            self.Wp = {"Rho" : w_1, "Alpha" : w_3, 
                           "Kappa" : w_4, "Cp" : w_5, "Eps0" : w_6, "Al" : w_7, "Scd" : w_8}
            self.Pp = {"Rho" : RhoPrint, "Alpha" : AlphaPrint, 
                           "Kappa" : KappaPrint, "Cp" : CpPrint, "Eps0" : Eps0Print, "Al" : AlPrint, "Scd" : ScdPrint}
            if AGrad:
                self.Gp = {"Rho" : G_Rho, "Alpha" : G_Alpha, 
                               "Kappa" : G_Kappa, "Cp" : G_Cp, "Eps0" : G_Eps0, "Al" : G_Al, "Scd" : G_Scd}
            self.Objective = Objective

        Answer = {'X':Objective, 'G':Gradient, 'H':Hessian}
        return Answer
예제 #41
0
    def driver(self):
        ## Actually run PSI4.
        if not in_fd() and CheckBasis():
            logger.info("Now checking for linear dependencies.\n")
            _exec("cp %s %s.bak" % (self.GBSfnm, self.GBSfnm),
                  print_command=False)
            ln0 = self.write_nested_destroy(self.GBSfnm,
                                            self.FF.linedestroy_save)
            o = wopen(".lindep.dat")
            for line in open(self.DATfnm).readlines():
                s = line.split("#")[0].split()
                if len(s) == 3 and s[0].lower() == 'basis' and s[1].lower(
                ) == 'file':
                    print("basis file %s" % self.GBSfnm, file=o)
                else:
                    print(line, end=' ', file=o)
            o.close()
            _exec("mv .lindep.dat %s" % self.DATfnm, print_command=False)
            _exec("psi4 %s" % self.DATfnm, print_command=False)
            LI = GBS_Reader()
            LI_lines = {}
            ## Read in the commented linindep.gbs file and ensure that these same lines are commented in the new .gbs file
            for line in open('linindep.gbs'):
                LI.feed(line, linindep=True)
                key = '.'.join([
                    str(i)
                    for i in (LI.element, LI.amom, LI.basis_number[LI.element],
                              LI.contraction_number)
                ])
                if LI.isdata:
                    if key in LI_lines:
                        logger.info("Duplicate key found:\n")
                        logger.info("%s\n" % key)
                        logger.info(str(LI_lines[key]))
                        logger.info(line)
                        warn_press_key(
                            "In %s, the LI_lines dictionary should not contain repeated keys!"
                            % __file__)
                    LI_lines[key] = (line, LI.destroy)
            ## Now build a "Frankenstein" .gbs file composed of the original .gbs file but with data from the linindep.gbs file!
            FK = GBS_Reader()
            FK_lines = []
            self.FF.linedestroy_this = []
            self.FF.prmdestroy_this = []
            for ln, line in enumerate(open(self.GBSfnm).readlines()):
                FK.feed(line)
                key = '.'.join([
                    str(i)
                    for i in (FK.element, FK.amom, FK.basis_number[FK.element],
                              FK.contraction_number)
                ])
                if FK.isdata and key in LI_lines:
                    if LI_lines[key][1]:
                        logger.info("Destroying line %i (originally %i): " %
                                    (ln, ln0[ln]))
                        logger.info(line)
                        self.FF.linedestroy_this.append(ln)
                        for p_destroy in [
                                i for i, fld in enumerate(self.FF.pfields)
                                if any([
                                    subfld[0] == self.GBSfnm
                                    and subfld[1] == ln0[ln] for subfld in fld
                                ])
                        ]:
                            logger.info(
                                "Destroying parameter %i located at line %i (originally %i) with fields given by: %s"
                                % (p_destroy, ln, ln0[ln],
                                   str(self.FF.pfields[p_destroy])))
                            self.FF.prmdestroy_this.append(p_destroy)
                    FK_lines.append(LI_lines[key][0])
                else:
                    FK_lines.append(line)
            o = wopen('franken.gbs')
            for line in FK_lines:
                print(line, end=' ', file=o)
            o.close()
            _exec("cp %s.bak %s" % (self.GBSfnm, self.GBSfnm),
                  print_command=False)

            if len(
                    list(
                        itertools.chain(*(self.FF.linedestroy_save +
                                          [self.FF.linedestroy_this])))) > 0:
                logger.info("All lines removed: " + self.FF.linedestroy_save +
                            [self.FF.linedestroy_this] + '\n')
                logger.info("All prms removed: " + self.FF.prmdestroy_save +
                            [self.FF.prmdestroy_this] + '\n')

        self.write_nested_destroy(
            self.GBSfnm, self.FF.linedestroy_save + [self.FF.linedestroy_this])
        _exec("psi4", print_command=False, outfnm="psi4.stdout")
        if not in_fd():
            for line in open('psi4.stdout').readlines():
                if "MP2 Energy:" in line:
                    self.MP2_Energy = float(line.split()[-1])
                elif "DF Energy:" in line:
                    self.DF_Energy = float(line.split()[-1])
        Ans = np.array([[float(i) for i in line.split()]
                        for line in open("objective.dat").readlines()])
        os.unlink("objective.dat")
        return Ans
예제 #42
0
 def interaction_driver_all(self, dielectric=False):
     """ Computes the energy and force using GROMACS for a trajectory.  This does not require GROMACS-X2. """
     # Remove backup files.
     rm_gmx_baks(os.getcwd())
     # Do the interacting calculation.
     _exec(["./grompp", "-f", "interaction.mdp", "-n", "index.ndx"], print_command=False)
     _exec(["./mdrun", "-nt", "1", "-rerunvsite", "-rerun", "all.gro"], print_command=False)
     # Gather information
     _exec(["./g_energy","-xvg","no"], print_command=False, stdin="Potential\n")
     Interact = array([float(l.split()[1]) for l in open('energy.xvg').readlines()])
     # Do the excluded calculation.
     _exec(["./grompp", "-f", "excluded.mdp", "-n", "index.ndx"], print_command=False)
     _exec(["./mdrun", "-nt", "1", "-rerunvsite", "-rerun", "all.gro"], print_command=False)
     # Gather information
     _exec(["./g_energy","-xvg","no"], print_command=False, stdin="Potential\n")
     Excluded = array([float(l.split()[1]) for l in open('energy.xvg').readlines()])
     # The interaction energy.
     M = Interact - Excluded
     # Now we have the MM interaction energy.
     # We need the COSMO component of the interaction energy now...
     if dielectric:
         traj_dimer = deepcopy(self.traj)
         traj_dimer.add_quantum("qtemp_D.in")
         traj_dimer.write("qchem_dimer.in",ftype="qcin")
         traj_monoA = deepcopy(self.traj)
         traj_monoA.add_quantum("qtemp_A.in")
         traj_monoA.write("qchem_monoA.in",ftype="qcin")
         traj_monoB = deepcopy(self.traj)
         traj_monoB.add_quantum("qtemp_B.in")
         traj_monoB.write("qchem_monoB.in",ftype="qcin")
         wq = getWorkQueue()
         if wq == None:
             warn_press_key("To proceed past this point, a Work Queue must be present")
         print "Computing the dielectric energy"
         Diel_D = QChem_Dielectric_Energy("qchem_dimer.in",wq)
         Diel_A = QChem_Dielectric_Energy("qchem_monoA.in",wq)
         # The dielectric energy for a water molecule should never change.
         if hasattr(self,"Diel_B"):
             Diel_B = self.Diel_B
         else:
             Diel_B = QChem_Dielectric_Energy("qchem_monoB.in",self.wq)
             self.Diel_B = Diel_B
         self.Dielectric = Diel_D - Diel_A - Diel_B
     M += self.Dielectric
     return M
예제 #43
0
    def get(self, mvals, AGrad=True, AHess=True):
        
        """
        Fitting of liquid bulk properties.  This is the current major
        direction of development for ForceBalance.  Basically, fitting
        the QM energies / forces alone does not always give us the
        best simulation behavior.  In many cases it makes more sense
        to try and reproduce some experimentally known data as well.

        In order to reproduce experimentally known data, we need to
        run a simulation and compare the simulation result to
        experiment.  The main challenge here is that the simulations
        are computationally intensive (i.e. they require energy and
        force evaluations), and furthermore the results are noisy.  We
        need to run the simulations automatically and remotely
        (i.e. on clusters) and a good way to calculate the derivatives
        of the simulation results with respect to the parameter values.

        This function contains some experimentally known values of the
        density and enthalpy of vaporization (Hvap) of liquid water.
        It launches the density and Hvap calculations on the cluster,
        and gathers the results / derivatives.  The actual calculation
        of results / derivatives is done in a separate file.

        After the results come back, they are gathered together to form
        an objective function.

        @param[in] mvals Mathematical parameter values
        @param[in] AGrad Switch to turn on analytic gradient
        @param[in] AHess Switch to turn on analytic Hessian
        @return Answer Contribution to the objective function
        
        """

        Answer = {}

        Results = {}
        Points = []  # These are the phase points for which data exists.
        BPoints = [] # These are the phase points for which we are doing MBAR for the condensed phase.
        mBPoints = [] # These are the phase points for which we are doing MBAR for the monomers.
        mPoints = [] # These are the phase points to use for enthalpy of vaporization; if we're scanning pressure then set hvap_wt for higher pressures to zero.
        tt = 0
        for label, PT in zip(self.Labels, self.PhasePoints):
            if os.path.exists('./%s/npt_result.p.bz2' % label):
                _exec('bunzip2 ./%s/npt_result.p.bz2' % label)
            elif os.path.exists('./%s/npt_result.p' % label): pass
            else:
                logger.warning('In %s :\n' % os.getcwd())
                logger.warning('The file ./%s/npt_result.p.bz2 does not exist so we cannot unzip it\n' % label)
            if os.path.exists('./%s/npt_result.p' % label):
                logger.info('Reading information from ./%s/npt_result.p\n' % label)
                Points.append(PT)
                Results[tt] = lp_load(open('./%s/npt_result.p' % label))
                if 'hvap' in self.RefData and PT[0] not in [i[0] for i in mPoints]:
                    mPoints.append(PT)
                if 'mbar' in self.RefData and PT in self.RefData['mbar'] and self.RefData['mbar'][PT]:
                    BPoints.append(PT)
                    if 'hvap' in self.RefData and PT[0] not in [i[0] for i in mBPoints]:
                        mBPoints.append(PT)
                tt += 1
            else:
                logger.warning('The file ./%s/npt_result.p does not exist so we cannot read it\n' % label)
                pass
                # for obs in self.RefData:
                #     del self.RefData[obs][PT]
        if len(Points) == 0:
            raise Exception('The liquid simulations have terminated with \x1b[1;91mno readable data\x1b[0m - this is a problem!')

        # Assign variable names to all the stuff in npt_result.p
        Rhos, Vols, Potentials, Energies, Dips, Grads, GDips, mPotentials, mEnergies, mGrads, \
            Rho_errs, Hvap_errs, Alpha_errs, Kappa_errs, Cp_errs, Eps0_errs, NMols = ([Results[t][i] for t in range(len(Points))] for i in range(17))
        # Determine the number of molecules
        if len(set(NMols)) != 1:
            logger.error(str(NMols))
            raise Exception('The above list should only contain one number - the number of molecules')
        else:
            NMol = list(set(NMols))[0]
    
        R  = np.array(list(itertools.chain(*list(Rhos))))
        V  = np.array(list(itertools.chain(*list(Vols))))
        E  = np.array(list(itertools.chain(*list(Energies))))
        Dx = np.array(list(itertools.chain(*list(d[:,0] for d in Dips))))
        Dy = np.array(list(itertools.chain(*list(d[:,1] for d in Dips))))
        Dz = np.array(list(itertools.chain(*list(d[:,2] for d in Dips))))
        G  = np.hstack(tuple(Grads))
        GDx = np.hstack(tuple(gd[0] for gd in GDips))
        GDy = np.hstack(tuple(gd[1] for gd in GDips))
        GDz = np.hstack(tuple(gd[2] for gd in GDips))
        if len(mPoints) > 0:
            mE = np.array(list(itertools.chain(*list([i for pt, i in zip(Points,mEnergies) if pt in mPoints]))))
            mG = np.hstack(tuple([i for pt, i in zip(Points,mGrads) if pt in mPoints]))

        Rho_calc = OrderedDict([])
        Rho_grad = OrderedDict([])
        Rho_std  = OrderedDict([])
        Hvap_calc = OrderedDict([])
        Hvap_grad = OrderedDict([])
        Hvap_std  = OrderedDict([])
        Alpha_calc = OrderedDict([])
        Alpha_grad = OrderedDict([])
        Alpha_std  = OrderedDict([])
        Kappa_calc = OrderedDict([])
        Kappa_grad = OrderedDict([])
        Kappa_std  = OrderedDict([])
        Cp_calc = OrderedDict([])
        Cp_grad = OrderedDict([])
        Cp_std  = OrderedDict([])
        Eps0_calc = OrderedDict([])
        Eps0_grad = OrderedDict([])
        Eps0_std  = OrderedDict([])

        # The unit that converts atmospheres * nm**3 into kj/mol :)
        pvkj=0.061019351687175

        # Run MBAR using the total energies. Required for estimates that use the kinetic energy.
        BSims = len(BPoints)
        Shots = len(Energies[0])
        N_k = np.ones(BSims)*Shots
        # Use the value of the energy for snapshot t from simulation k at potential m
        U_kln = np.zeros([BSims,BSims,Shots], dtype = np.float64)
        for m, PT in enumerate(BPoints):
            T = PT[0]
            P = PT[1] / 1.01325 if PT[2] == 'bar' else PT[1]
            beta = 1. / (kb * T)
            for k in range(BSims):
                # The correct Boltzmann factors include PV.
                # Note that because the Boltzmann factors are computed from the conditions at simulation "m",
                # the pV terms must be rescaled to the pressure at simulation "m".
                kk = Points.index(BPoints[k])
                U_kln[k, m, :]   = Energies[kk] + P*Vols[kk]*pvkj
                U_kln[k, m, :]  *= beta
        W1 = None
        if len(BPoints) > 1:
            logger.info("Running MBAR analysis on %i states...\n" % len(BPoints))
            mbar = pymbar.MBAR(U_kln, N_k, verbose=True, relative_tolerance=5.0e-8)
            W1 = mbar.getWeights()
            logger.info("Done\n")
        elif len(BPoints) == 1:
            W1 = np.ones((BPoints*Shots,BPoints),dtype=float)
            W1 /= BPoints*Shots
        
        def fill_weights(weights, phase_points, mbar_points, snapshots):
            """ Fill in the weight matrix with MBAR weights where MBAR was run, 
            and equal weights otherwise. """
            new_weights = np.zeros([len(phase_points)*snapshots,len(phase_points)],dtype=np.float64)
            for m, PT in enumerate(phase_points):
                if PT in mbar_points:
                    mm = mbar_points.index(PT)
                    for kk, PT1 in enumerate(mbar_points):
                        k = phase_points.index(PT1)
                        logger.debug("Will fill W2[%i:%i,%i] with W1[%i:%i,%i]\n" % (k*snapshots,k*snapshots+snapshots,m,kk*snapshots,kk*snapshots+snapshots,mm))
                        new_weights[k*snapshots:(k+1)*snapshots,m] = weights[kk*snapshots:(kk+1)*snapshots,mm]
                else:
                    logger.debug("Will fill W2[%i:%i,%i] with equal weights\n" % (m*snapshots,(m+1)*snapshots,m))
                    new_weights[m*snapshots:(m+1)*snapshots,m] = 1.0/snapshots
            return new_weights
        
        W2 = fill_weights(W1, Points, BPoints, Shots)

        # Run MBAR on the monomers.  This is barely necessary.
        mW1 = None
        mShots = len(mEnergies[0])
        if len(mBPoints) > 0:
            mBSims = len(mBPoints)
            mN_k = np.ones(mBSims)*mShots
            mU_kln = np.zeros([mBSims,mBSims,mShots], dtype = np.float64)
            for m, PT in enumerate(mBPoints):
                T = PT[0]
                beta = 1. / (kb * T)
                for k in range(mBSims):
                    kk = Points.index(mBPoints[k])
                    mU_kln[k, m, :]  = mEnergies[kk]
                    mU_kln[k, m, :] *= beta
            if np.abs(np.std(mEnergies)) > 1e-6 and mBSims > 1:
                mmbar = pymbar.MBAR(mU_kln, mN_k, verbose=False, relative_tolerance=5.0e-8, method='self-consistent-iteration')
                mW1 = mmbar.getWeights()
        elif len(mBPoints) == 1:
            mW1 = np.ones((mBSims*mShots,mSims),dtype=float)
            mW1 /= mBSims*mShots

        mW2 = fill_weights(mW1, mPoints, mBPoints, mShots)
         
        if self.do_self_pol:
            EPol = self.polarization_correction(mvals)
            GEPol = np.array([f12d3p(fdwrap(self.polarization_correction, mvals, p), h = self.h, f0 = EPol)[0] for p in range(self.FF.np)])
            bar = printcool("Self-polarization correction to \nenthalpy of vaporization is % .3f kJ/mol%s" % (EPol, ", Derivative:" if AGrad else ""))
            if AGrad:
                self.FF.print_map(vals=GEPol)
                logger.info(bar)
            
        for i, PT in enumerate(Points):
            T = PT[0]
            P = PT[1] / 1.01325 if PT[2] == 'bar' else PT[1]
            PV = P*V*pvkj
            H = E + PV
            # The weights that we want are the last ones.
            W = flat(W2[:,i])
            C = weight_info(W, PT, np.ones(len(Points), dtype=np.float64)*Shots, verbose=True)
            Gbar = flat(np.mat(G)*col(W))
            mBeta = -1/kb/T
            Beta  = 1/kb/T
            kT    = kb*T
            # Define some things to make the analytic derivatives easier.
            def avg(vec):
                return np.dot(W,vec)
            def covde(vec):
                return flat(np.mat(G)*col(W*vec)) - avg(vec)*Gbar
            def deprod(vec):
                return flat(np.mat(G)*col(W*vec))
            ## Density.
            Rho_calc[PT]   = np.dot(W,R)
            Rho_grad[PT]   = mBeta*(flat(np.mat(G)*col(W*R)) - np.dot(W,R)*Gbar)
            ## Enthalpy of vaporization.
            if PT in mPoints:
                ii = mPoints.index(PT)
                mW = flat(mW2[:,ii])
                mGbar = flat(np.mat(mG)*col(mW))
                Hvap_calc[PT]  = np.dot(mW,mE) - np.dot(W,E)/NMol + kb*T - np.dot(W, PV)/NMol
                Hvap_grad[PT]  = mGbar + mBeta*(flat(np.mat(mG)*col(mW*mE)) - np.dot(mW,mE)*mGbar)
                Hvap_grad[PT] -= (Gbar + mBeta*(flat(np.mat(G)*col(W*E)) - np.dot(W,E)*Gbar)) / NMol
                Hvap_grad[PT] -= (mBeta*(flat(np.mat(G)*col(W*PV)) - np.dot(W,PV)*Gbar)) / NMol
                if self.do_self_pol:
                    Hvap_calc[PT] -= EPol
                    Hvap_grad[PT] -= GEPol
                if hasattr(self,'use_cni') and self.use_cni:
                    if not ('cni' in self.RefData and self.RefData['cni'][PT]):
                        raise RuntimeError('Asked for a nonideality correction but not provided in reference data (data.csv).  Either disable the option in data.csv or add data.')
                    logger.info("Adding % .3f to enthalpy of vaporization at " % self.RefData['cni'][PT] + str(PT) + '\n')
                    Hvap_calc[PT] += self.RefData['cni'][PT]
                if hasattr(self,'use_cvib_intra') and self.use_cvib_intra:
                    if not ('cvib_intra' in self.RefData and self.RefData['cvib_intra'][PT]):
                        raise RuntimeError('Asked for a quantum intramolecular vibrational correction but not provided in reference data (data.csv).  Either disable the option in data.csv or add data.')
                    logger.info("Adding % .3f to enthalpy of vaporization at " % self.RefData['cvib_intra'][PT] + str(PT) + '\n')
                    Hvap_calc[PT] += self.RefData['cvib_intra'][PT]
                if hasattr(self,'use_cvib_inter') and self.use_cvib_inter:
                    if not ('cvib_inter' in self.RefData and self.RefData['cvib_inter'][PT]):
                        raise RuntimeError('Asked for a quantum intermolecular vibrational correction but not provided in reference data (data.csv).  Either disable the option in data.csv or add data.')
                    logger.info("Adding % .3f to enthalpy of vaporization at " % self.RefData['cvib_inter'][PT] + str(PT) + '\n')
                    Hvap_calc[PT] += self.RefData['cvib_inter'][PT]
            else:
                Hvap_calc[PT]  = 0.0
                Hvap_grad[PT]  = np.zeros(self.FF.np,dtype=float)
            ## Thermal expansion coefficient.
            Alpha_calc[PT] = 1e4 * (avg(H*V)-avg(H)*avg(V))/avg(V)/(kT*T)
            GAlpha1 = -1 * Beta * deprod(H*V) * avg(V) / avg(V)**2
            GAlpha2 = +1 * Beta * avg(H*V) * deprod(V) / avg(V)**2
            GAlpha3 = deprod(V)/avg(V) - Gbar
            GAlpha4 = Beta * covde(H)
            Alpha_grad[PT] = 1e4 * (GAlpha1 + GAlpha2 + GAlpha3 + GAlpha4)/(kT*T)
            ## Isothermal compressibility.
            bar_unit = 0.06022141793 * 1e6
            Kappa_calc[PT] = bar_unit / kT * (avg(V**2)-avg(V)**2)/avg(V)
            GKappa1 = +1 * Beta**2 * avg(V**2) * deprod(V) / avg(V)**2
            GKappa2 = -1 * Beta**2 * avg(V) * deprod(V**2) / avg(V)**2
            GKappa3 = +1 * Beta**2 * covde(V)
            Kappa_grad[PT] = bar_unit*(GKappa1 + GKappa2 + GKappa3)
            ## Isobaric heat capacity.
            Cp_calc[PT] = 1000/(4.184*NMol*kT*T) * (avg(H**2) - avg(H)**2)
            if hasattr(self,'use_cvib_intra') and self.use_cvib_intra:
                logger.info("Adding " + str(self.RefData['devib_intra'][PT]) + " to the heat capacity\n")
                Cp_calc[PT] += self.RefData['devib_intra'][PT]
            if hasattr(self,'use_cvib_inter') and self.use_cvib_inter:
                logger.info("Adding " + str(self.RefData['devib_inter'][PT]) + " to the heat capacity\n")
                Cp_calc[PT] += self.RefData['devib_inter'][PT]
            GCp1 = 2*covde(H) * 1000 / 4.184 / (NMol*kT*T)
            GCp2 = mBeta*covde(H**2) * 1000 / 4.184 / (NMol*kT*T)
            GCp3 = 2*Beta*avg(H)*covde(H) * 1000 / 4.184 / (NMol*kT*T)
            Cp_grad[PT] = GCp1 + GCp2 + GCp3
            ## Static dielectric constant.
            prefactor = 30.348705333964077
            D2 = avg(Dx**2)+avg(Dy**2)+avg(Dz**2)-avg(Dx)**2-avg(Dy)**2-avg(Dz)**2
            Eps0_calc[PT] = 1.0 + prefactor*(D2/avg(V))/T
            GD2  = 2*(flat(np.mat(GDx)*col(W*Dx)) - avg(Dx)*flat(np.mat(GDx)*col(W))) - Beta*(covde(Dx**2) - 2*avg(Dx)*covde(Dx))
            GD2 += 2*(flat(np.mat(GDy)*col(W*Dy)) - avg(Dy)*flat(np.mat(GDy)*col(W))) - Beta*(covde(Dy**2) - 2*avg(Dy)*covde(Dy))
            GD2 += 2*(flat(np.mat(GDz)*col(W*Dz)) - avg(Dz)*flat(np.mat(GDz)*col(W))) - Beta*(covde(Dz**2) - 2*avg(Dz)*covde(Dz))
            Eps0_grad[PT] = prefactor*(GD2/avg(V) - mBeta*covde(V)*D2/avg(V)**2)/T
            ## Estimation of errors.
            Rho_std[PT]    = np.sqrt(sum(C**2 * np.array(Rho_errs)**2))
            if PT in mPoints:
                Hvap_std[PT]   = np.sqrt(sum(C**2 * np.array(Hvap_errs)**2))
            else:
                Hvap_std[PT]   = 0.0
            Alpha_std[PT]   = np.sqrt(sum(C**2 * np.array(Alpha_errs)**2)) * 1e4
            Kappa_std[PT]   = np.sqrt(sum(C**2 * np.array(Kappa_errs)**2)) * 1e6
            Cp_std[PT]   = np.sqrt(sum(C**2 * np.array(Cp_errs)**2))
            Eps0_std[PT]   = np.sqrt(sum(C**2 * np.array(Eps0_errs)**2))

        # Get contributions to the objective function
        X_Rho, G_Rho, H_Rho, RhoPrint = self.objective_term(Points, 'rho', Rho_calc, Rho_std, Rho_grad, name="Density")
        X_Hvap, G_Hvap, H_Hvap, HvapPrint = self.objective_term(Points, 'hvap', Hvap_calc, Hvap_std, Hvap_grad, name="H_vap", SubAverage=self.hvap_subaverage)
        X_Alpha, G_Alpha, H_Alpha, AlphaPrint = self.objective_term(Points, 'alpha', Alpha_calc, Alpha_std, Alpha_grad, name="Thermal Expansion")
        X_Kappa, G_Kappa, H_Kappa, KappaPrint = self.objective_term(Points, 'kappa', Kappa_calc, Kappa_std, Kappa_grad, name="Compressibility")
        X_Cp, G_Cp, H_Cp, CpPrint = self.objective_term(Points, 'cp', Cp_calc, Cp_std, Cp_grad, name="Heat Capacity")
        X_Eps0, G_Eps0, H_Eps0, Eps0Print = self.objective_term(Points, 'eps0', Eps0_calc, Eps0_std, Eps0_grad, name="Dielectric Constant")

        Gradient = np.zeros(self.FF.np, dtype=float)
        Hessian = np.zeros((self.FF.np,self.FF.np),dtype=float)

        if X_Rho == 0: self.w_rho = 0.0
        if X_Hvap == 0: self.w_hvap = 0.0
        if X_Alpha == 0: self.w_alpha = 0.0
        if X_Kappa == 0: self.w_kappa = 0.0
        if X_Cp == 0: self.w_cp = 0.0
        if X_Eps0 == 0: self.w_eps0 = 0.0

        w_tot = self.w_rho + self.w_hvap + self.w_alpha + self.w_kappa + self.w_cp + self.w_eps0
        w_1 = self.w_rho / w_tot
        w_2 = self.w_hvap / w_tot
        w_3 = self.w_alpha / w_tot
        w_4 = self.w_kappa / w_tot
        w_5 = self.w_cp / w_tot
        w_6 = self.w_eps0 / w_tot

        Objective    = w_1 * X_Rho + w_2 * X_Hvap + w_3 * X_Alpha + w_4 * X_Kappa + w_5 * X_Cp + w_6 * X_Eps0
        if AGrad:
            Gradient = w_1 * G_Rho + w_2 * G_Hvap + w_3 * G_Alpha + w_4 * G_Kappa + w_5 * G_Cp + w_6 * G_Eps0
        if AHess:
            Hessian  = w_1 * H_Rho + w_2 * H_Hvap + w_3 * H_Alpha + w_4 * H_Kappa + w_5 * H_Cp + w_6 * H_Eps0

        PrintDict = OrderedDict()
        if X_Rho > 0:
            printcool_dictionary(RhoPrint, title='%s Density (kg m^-3) \nTemperature  Pressure  Reference  Calculated +- Stdev     Delta    Weight    Term   ' % self.name,bold=True,color=4,keywidth=15)
            bar = printcool("Density objective function: % .3f%s" % (X_Rho, ", Derivative:" if AGrad else ""))
            if AGrad:
                self.FF.print_map(vals=G_Rho)
                logger.info(bar)
            PrintDict['Density'] = "% 10.5f % 8.3f % 14.5e" % (X_Rho, w_1, X_Rho*w_1)

        if X_Hvap > 0:
            printcool_dictionary(HvapPrint, title='%s Enthalpy of Vaporization (kJ mol^-1) \nTemperature  Pressure  Reference  Calculated +- Stdev     Delta    Weight    Term   ' % self.name,bold=True,color=4,keywidth=15)
            bar = printcool("H_vap objective function: % .3f%s" % (X_Hvap, ", Derivative:" if AGrad else ""))
            if AGrad:
                self.FF.print_map(vals=G_Hvap)
                logger.info(bar)
                
            PrintDict['Enthalpy of Vaporization'] = "% 10.5f % 8.3f % 14.5e" % (X_Hvap, w_2, X_Hvap*w_2)

        if X_Alpha > 0:
            printcool_dictionary(AlphaPrint,title='%s Thermal Expansion Coefficient (10^-4 K^-1) \nTemperature  Pressure  Reference  Calculated +- Stdev     Delta    Weight    Term   ' % self.name,bold=True,color=4,keywidth=15)
            bar = printcool("Thermal Expansion objective function: % .3f%s" % (X_Alpha, ", Derivative:" if AGrad else ""))
            if AGrad:
                self.FF.print_map(vals=G_Alpha)
                logger.info(bar)

            PrintDict['Thermal Expansion Coefficient'] = "% 10.5f % 8.3f % 14.5e" % (X_Alpha, w_3, X_Alpha*w_3)

        if X_Kappa > 0:
            printcool_dictionary(KappaPrint,title='%s Isothermal Compressibility (10^-6 bar^-1) \nTemperature  Pressure  Reference  Calculated +- Stdev     Delta    Weight    Term   ' % self.name,bold=True,color=4,keywidth=15)
            bar = printcool("Compressibility objective function: % .3f%s" % (X_Kappa, ", Derivative:" if AGrad else ""))
            if AGrad:
                self.FF.print_map(vals=G_Kappa)
                logger.info(bar)
                
            PrintDict['Isothermal Compressibility'] = "% 10.5f % 8.3f % 14.5e" % (X_Kappa, w_4, X_Kappa*w_4)

        if X_Cp > 0:
            printcool_dictionary(CpPrint,   title='%s Isobaric Heat Capacity (cal mol^-1 K^-1) \nTemperature  Pressure  Reference  Calculated +- Stdev     Delta    Weight    Term   ' % self.name,bold=True,color=4,keywidth=15)
            bar = printcool("Heat Capacity objective function: % .3f%s" % (X_Cp, ", Derivative:" if AGrad else ""))
            if AGrad:
                self.FF.print_map(vals=G_Cp)
                logger.info(bar)

            PrintDict['Isobaric Heat Capacity'] = "% 10.5f % 8.3f % 14.5e" % (X_Cp, w_5, X_Cp*w_5)

        if X_Eps0 > 0:
            printcool_dictionary(Eps0Print,   title='%s Dielectric Constant\nTemperature  Pressure  Reference  Calculated +- Stdev     Delta    Weight    Term   ' % self.name,bold=True,color=4,keywidth=15)
            bar = printcool("Dielectric Constant objective function: % .3f%s" % (X_Eps0, ", Derivative:" if AGrad else ""))
            if AGrad:
                self.FF.print_map(vals=G_Eps0)
                logger.info(bar)

            PrintDict['Dielectric Constant'] = "% 10.5f % 8.3f % 14.5e" % (X_Eps0, w_6, X_Eps0*w_6)

        PrintDict['Total'] = "% 10s % 8s % 14.5e" % ("","",Objective)

        Title = "%s Condensed Phase Properties:\n %-20s %40s" % (self.name, "Property Name", "Residual x Weight = Contribution")
        printcool_dictionary(PrintDict,color=4,title=Title,keywidth=31)

        Answer = {'X':Objective, 'G':Gradient, 'H':Hessian}
        return Answer
예제 #44
0
                if FK.isdata and key in LI_lines:
                    if LI_lines[key][1]:
                        logger.info("Destroying line %i (originally %i): " % (ln, ln0[ln]))
                        logger.info(line)
                        self.FF.linedestroy_this.append(ln)
                        for p_destroy in [i for i, fld in enumerate(self.FF.pfields) if any([subfld[0] == self.GBSfnm and subfld[1] == ln0[ln] for subfld in fld])]:
                            logger.info("Destroying parameter %i located at line %i (originally %i) with fields given by: %s" % (p_destroy, ln, ln0[ln], str(self.FF.pfields[p_destroy])))
                            self.FF.prmdestroy_this.append(p_destroy)
                    FK_lines.append(LI_lines[key][0])
                else:
                    FK_lines.append(line)
            o = wopen('franken.gbs')
            for line in FK_lines:
                print >> o, line,
            o.close()
            _exec("cp %s.bak %s" % (self.GBSfnm, self.GBSfnm), print_command=False)
            
            if len(list(itertools.chain(*(self.FF.linedestroy_save + [self.FF.linedestroy_this])))) > 0:
                logger.info("All lines removed: " + self.FF.linedestroy_save + [self.FF.linedestroy_this] + '\n')
                logger.info("All prms removed: " + self.FF.prmdestroy_save + [self.FF.prmdestroy_this] + '\n')

        self.write_nested_destroy(self.GBSfnm, self.FF.linedestroy_save + [self.FF.linedestroy_this])
        _exec("psi4", print_command=False, outfnm="psi4.stdout")
        if not in_fd():
            for line in open('psi4.stdout').readlines():
                if "MP2 Energy:" in line:
                    self.MP2_Energy = float(line.split()[-1])
                elif "DF Energy:" in line:
                    self.DF_Energy = float(line.split()[-1])
        Ans = np.array([[float(i) for i in line.split()] for line in open("objective.dat").readlines()])
        os.unlink("objective.dat")