예제 #1
0
def fe_dd(comp, pose, mode, lambdas, weights, dd_type, rest_file, temperature):

    kB = 1.381e-23 * 6.022e23 / (4.184 * 1000.0
                                 )  # Boltzmann constant in kJ/mol/K
    beta = 1 / (kB * temperature)  # beta
    N_max = 20000  # Max frames for any simulation window, you should check this if you did some long runs

    os.chdir('fe')
    os.chdir(pose)
    os.chdir('dd')
    if comp == 'f' or comp == 'w':
        os.chdir('bulk')
    elif comp == 'v' or comp == 'e':
        os.chdir('site')
    if not os.path.exists('data'):
        os.makedirs('data')

    # Define log file
    sys.stdout = open('./data/' + dd_type + '-' + comp + '-' + mode + '.dat',
                      'w')

    ### Determine Number of windows
    K = 0
    filename = './' + comp + '%02.0f/%s' % (K, rest_file)
    while os.path.isfile(filename):
        K = K + 1
        filename = './' + comp + '%02.0f/%s' % (K, rest_file)

    if dd_type == 'ti':
        deltag = 0
        dvdl = []
        for k in range(K):
            data = []
            # Read in Values for restrained variables for each simulation
            filename = './' + comp + '%02.0f/%s' % (k, rest_file)
            infile = open(filename, 'r')
            restdat = infile.readlines(
            )  # slice off first 20 lines  readlines()[20:]
            infile.close()
            # Parse Data
            for line in restdat:
                data.append(float(line.split()[1]))
            dvdl.append(float(sum(data) / len(data)))

        for i in range(0, len(dvdl)):
            print('%-10s%6.5f,  %-8s%9.5f' %
                  ('lambda =', float(lambdas[i]), 'dvdl =', float(dvdl[i])))

        for i in range(K):
            deltag = deltag + dvdl[i] * weights[i]

        print('\n%-8s %9.5f' % ('deltaG  ', float(deltag)))
    elif dd_type == 'mbar':

        ### Allocate storage for simulation data
        N = np.zeros(
            [K], np.int32
        )  # N_k[k] is the number of snapshots to be used from umbrella simulation k
        Neff = np.zeros([K], np.int32)
        Nind = np.zeros([K], np.int32)
        val = np.zeros(
            [N_max, K, K],
            np.float64)  # value of the restrained variable at each frame n
        g = np.zeros([K], np.float64)
        u = np.zeros([N_max], np.float64)

        ### Calculate Statistical Inefficiency (g)
        def calcg(data):
            sum = 0
            randnum = ("%05.0f" % (int(100000 * np.random.random())))
            datafn = '/dev/shm/series.' + randnum + '.dat'
            acffn = '/dev/shm/acf.' + randnum + '.dat'
            cppfn = '/dev/shm/pt-acf.' + randnum + '.in'
            np.savetxt(datafn, data)
            cpptin = open(cppfn, 'w')
            cpptin.write("readdata " + datafn + " name " + randnum +
                         "\nautocorr " + randnum + " out " + acffn +
                         " noheader\n")
            cpptin.close()

            FNULL = open(os.devnull, 'w')
            sp.call(['cpptraj', '-i', cppfn], stdout=FNULL, stderr=sp.STDOUT)

            with open(acffn, 'r') as acf:
                for line in acf:
                    col = line.split()
                    t = float(col[0]) - 1.0
            T = t

            with open(acffn, 'r') as acf:
                for line in acf:
                    col = line.split()
                    t = float(col[0]) - 1.0
                    v = float(col[1])
                    if t == 0:
                        continue
                    if v < 0.0:
                        break
                    sum += (1 - (t / T)) * (v)

            sp.call(['rm', datafn, acffn, cppfn])

            return 1 + (2 * sum)

        for k in range(K):
            # Read in Values for restrained variables for each simulation
            filename = './' + comp + '%02.0f/%s' % (k, rest_file)
            infile = open(filename, 'r')
            restdat = infile.readlines(
            )  # slice off first 20 lines  readlines()[20:]
            infile.close()
            # Parse Data
            n = 0
            lambdas = []
            for line in restdat:
                cols = line.split()
                if len(cols) >= 1:
                    lambdas.append(float(cols[1]))
                if len(cols) == 0:
                    break
            for line in restdat:
                cols = line.split()
                if len(cols) >= 1:
                    if '**' not in cols[2]:
                        lamb = float(cols[1].strip())
                        val[n, k, lambdas.index(lamb)] = cols[2]
                if len(cols) == 0:
                    n += 1
            N[k] = n

            # Calculate reduced potential
            u[0:N[k]] = beta * (val[0:N[k], k, k])

            # Subsample or not
            if mode == 'sub':
                g[k] = calcg(u[0:N[k]])
                subs = timeseries.subsampleCorrelatedData(np.zeros([N[k]]),
                                                          g=g[k])
                Nind[k] = len(subs)
                Neff[k] = Nind[k]
            else:
                g[k] = 1.00
                Neff[k] = N[k]

            print "Processed Window %5.0f.  N= %12.0f.  g= %10.3f   Neff= %12.0f" % (
                k, N[k], g[k], Neff[k])

        # Calculate decoupling energy
        Upot = np.zeros([K, K, np.max(Neff)], np.float64)
        for k in range(K):
            for l in range(K):
                Upot[k, l, 0:Neff[k]] = beta * (val[0:Neff[k], k, l])

        val = []

        print "\nRunning MBAR... "
        mbar = pymbar.MBAR(Upot,
                           Neff,
                           verbose=True,
                           method='adaptive',
                           initialize='BAR')

        print "Calculate Free Energy Differences Between States"
        [Deltaf, dDeltaf] = mbar.getFreeEnergyDifferences()

        min = np.argmin(Deltaf[0])

        # Write to file
        print "\nFree Energy Differences (in units of kcal/mol)"
        print "%9s %8s %8s" % ('lambda', 'f', 'df')
        for k in range(K):
            print "%10.5f %10.5f %10.5f" % (lambdas[k], Deltaf[0, k] / beta,
                                            dDeltaf[0, k] / beta)
        print "\n\n"

    os.chdir('../../../../')
예제 #2
0
    def get(self, mvals, AGrad=True, AHess=True):
        """
        Fitting of lipid bulk properties.  This is the current major
        direction of development for ForceBalance.  Basically, fitting
        the QM energies / forces alone does not always give us the
        best simulation behavior.  In many cases it makes more sense
        to try and reproduce some experimentally known data as well.

        In order to reproduce experimentally known data, we need to
        run a simulation and compare the simulation result to
        experiment.  The main challenge here is that the simulations
        are computationally intensive (i.e. they require energy and
        force evaluations), and furthermore the results are noisy.  We
        need to run the simulations automatically and remotely
        (i.e. on clusters) and a good way to calculate the derivatives
        of the simulation results with respect to the parameter values.

        This function contains some experimentally known values of the
        density and enthalpy of vaporization (Hvap) of lipid water.
        It launches the density and Hvap calculations on the cluster,
        and gathers the results / derivatives.  The actual calculation
        of results / derivatives is done in a separate file.

        After the results come back, they are gathered together to form
        an objective function.

        @param[in] mvals Mathematical parameter values
        @param[in] AGrad Switch to turn on analytic gradient
        @param[in] AHess Switch to turn on analytic Hessian
        @return Answer Contribution to the objective function
        
        """

        mbar_verbose = False

        Answer = {}

        Results = {}
        Points = []  # These are the phase points for which data exists.
        BPoints = [
        ]  # These are the phase points for which we are doing MBAR for the condensed phase.
        tt = 0
        for label, PT in zip(self.Labels, self.PhasePoints):
            if os.path.exists('./%s/npt_result.p' % label):
                logger.info('Reading information from ./%s/npt_result.p\n' %
                            label)
                Points.append(PT)
                Results[tt] = lp_load('./%s/npt_result.p' % label)
                tt += 1
            else:
                logger.warning(
                    'The file ./%s/npt_result.p does not exist so we cannot read it\n'
                    % label)
                pass
                # for obs in self.RefData:
                #     del self.RefData[obs][PT]
        if len(Points) == 0:
            logger.error(
                'The lipid simulations have terminated with \x1b[1;91mno readable data\x1b[0m - this is a problem!\n'
            )
            raise RuntimeError

        # Assign variable names to all the stuff in npt_result.p
        Rhos, Vols, Potentials, Energies, Dips, Grads, GDips, \
            Rho_errs, Alpha_errs, Kappa_errs, Cp_errs, Eps0_errs, NMols, Als, Al_errs, Scds, Scd_errs = ([Results[t][i] for t in range(len(Points))] for i in range(17))
        # Determine the number of molecules
        if len(set(NMols)) != 1:
            logger.error(str(NMols))
            logger.error(
                'The above list should only contain one number - the number of molecules\n'
            )
            raise RuntimeError
        else:
            NMol = list(set(NMols))[0]

        R = np.array(list(itertools.chain(*list(Rhos))))
        V = np.array(list(itertools.chain(*list(Vols))))
        E = np.array(list(itertools.chain(*list(Energies))))
        Dx = np.array(list(itertools.chain(*list(d[:, 0] for d in Dips))))
        Dy = np.array(list(itertools.chain(*list(d[:, 1] for d in Dips))))
        Dz = np.array(list(itertools.chain(*list(d[:, 2] for d in Dips))))
        G = np.hstack(tuple(Grads))
        GDx = np.hstack(tuple(gd[0] for gd in GDips))
        GDy = np.hstack(tuple(gd[1] for gd in GDips))
        GDz = np.hstack(tuple(gd[2] for gd in GDips))
        A = np.array(list(itertools.chain(*list(Als))))
        S = np.array(list(itertools.chain(*list(Scds))))

        Rho_calc = OrderedDict([])
        Rho_grad = OrderedDict([])
        Rho_std = OrderedDict([])
        Alpha_calc = OrderedDict([])
        Alpha_grad = OrderedDict([])
        Alpha_std = OrderedDict([])
        Kappa_calc = OrderedDict([])
        Kappa_grad = OrderedDict([])
        Kappa_std = OrderedDict([])
        Cp_calc = OrderedDict([])
        Cp_grad = OrderedDict([])
        Cp_std = OrderedDict([])
        Eps0_calc = OrderedDict([])
        Eps0_grad = OrderedDict([])
        Eps0_std = OrderedDict([])
        Al_calc = OrderedDict([])
        Al_grad = OrderedDict([])
        Al_std = OrderedDict([])
        Scd_calc = OrderedDict([])
        Scd_grad = OrderedDict([])
        Scd_std = OrderedDict([])

        # The unit that converts atmospheres * nm**3 into kj/mol :)
        pvkj = 0.061019351687175

        # Run MBAR using the total energies. Required for estimates that use the kinetic energy.
        BSims = len(BPoints)
        Shots = len(Energies[0])
        N_k = np.ones(BSims) * Shots
        # Use the value of the energy for snapshot t from simulation k at potential m
        U_kln = np.zeros([BSims, BSims, Shots])
        for m, PT in enumerate(BPoints):
            T = PT[0]
            P = PT[1] / 1.01325 if PT[2] == 'bar' else PT[1]
            beta = 1. / (kb * T)
            for k in range(BSims):
                # The correct Boltzmann factors include PV.
                # Note that because the Boltzmann factors are computed from the conditions at simulation "m",
                # the pV terms must be rescaled to the pressure at simulation "m".
                kk = Points.index(BPoints[k])
                U_kln[k, m, :] = Energies[kk] + P * Vols[kk] * pvkj
                U_kln[k, m, :] *= beta
        W1 = None
        if len(BPoints) > 1:
            logger.info("Running MBAR analysis on %i states...\n" %
                        len(BPoints))
            mbar = pymbar.MBAR(U_kln,
                               N_k,
                               verbose=mbar_verbose,
                               relative_tolerance=5.0e-8)
            W1 = mbar.getWeights()
            logger.info("Done\n")
        elif len(BPoints) == 1:
            W1 = np.ones((BPoints * Shots, BPoints))
            W1 /= BPoints * Shots

        def fill_weights(weights, phase_points, mbar_points, snapshots):
            """ Fill in the weight matrix with MBAR weights where MBAR was run, 
            and equal weights otherwise. """
            new_weights = np.zeros(
                [len(phase_points) * snapshots,
                 len(phase_points)])
            for m, PT in enumerate(phase_points):
                if PT in mbar_points:
                    mm = mbar_points.index(PT)
                    for kk, PT1 in enumerate(mbar_points):
                        k = phase_points.index(PT1)
                        logger.debug(
                            "Will fill W2[%i:%i,%i] with W1[%i:%i,%i]\n" %
                            (k * snapshots, k * snapshots + snapshots, m,
                             kk * snapshots, kk * snapshots + snapshots, mm))
                        new_weights[k * snapshots:(k + 1) * snapshots,
                                    m] = weights[kk * snapshots:(kk + 1) *
                                                 snapshots, mm]
                else:
                    logger.debug(
                        "Will fill W2[%i:%i,%i] with equal weights\n" %
                        (m * snapshots, (m + 1) * snapshots, m))
                    new_weights[m * snapshots:(m + 1) * snapshots,
                                m] = 1.0 / snapshots
            return new_weights

        W2 = fill_weights(W1, Points, BPoints, Shots)

        if self.do_self_pol:
            EPol = self.polarization_correction(mvals)
            GEPol = np.array([
                (f12d3p(fdwrap(self.polarization_correction, mvals, p),
                        h=self.h,
                        f0=EPol)[0] if p in self.pgrad else 0.0)
                for p in range(self.FF.np)
            ])
            bar = printcool(
                "Self-polarization correction to \nenthalpy of vaporization is % .3f kJ/mol%s"
                % (EPol, ", Derivative:" if AGrad else ""))
            if AGrad:
                self.FF.print_map(vals=GEPol)
                logger.info(bar)

        for i, PT in enumerate(Points):
            T = PT[0]
            P = PT[1] / 1.01325 if PT[2] == 'bar' else PT[1]
            PV = P * V * pvkj
            H = E + PV
            # The weights that we want are the last ones.
            W = flat(W2[:, i])
            C = weight_info(W,
                            PT,
                            np.ones(len(Points)) * Shots,
                            verbose=mbar_verbose)
            Gbar = flat(np.mat(G) * col(W))
            mBeta = -1 / kb / T
            Beta = 1 / kb / T
            kT = kb * T

            # Define some things to make the analytic derivatives easier.
            def avg(vec):
                return np.dot(W, vec)

            def covde(vec):
                return flat(np.mat(G) * col(W * vec)) - avg(vec) * Gbar

            def deprod(vec):
                return flat(np.mat(G) * col(W * vec))

            ## Density.
            Rho_calc[PT] = np.dot(W, R)
            Rho_grad[PT] = mBeta * (flat(np.mat(G) * col(W * R)) -
                                    np.dot(W, R) * Gbar)
            ## Ignore enthalpy.
            ## Thermal expansion coefficient.
            Alpha_calc[PT] = 1e4 * (avg(H * V) -
                                    avg(H) * avg(V)) / avg(V) / (kT * T)
            GAlpha1 = -1 * Beta * deprod(H * V) * avg(V) / avg(V)**2
            GAlpha2 = +1 * Beta * avg(H * V) * deprod(V) / avg(V)**2
            GAlpha3 = deprod(V) / avg(V) - Gbar
            GAlpha4 = Beta * covde(H)
            Alpha_grad[PT] = 1e4 * (GAlpha1 + GAlpha2 + GAlpha3 +
                                    GAlpha4) / (kT * T)
            ## Isothermal compressibility.
            bar_unit = 0.06022141793 * 1e6
            Kappa_calc[PT] = bar_unit / kT * (avg(V**2) - avg(V)**2) / avg(V)
            GKappa1 = +1 * Beta**2 * avg(V**2) * deprod(V) / avg(V)**2
            GKappa2 = -1 * Beta**2 * avg(V) * deprod(V**2) / avg(V)**2
            GKappa3 = +1 * Beta**2 * covde(V)
            Kappa_grad[PT] = bar_unit * (GKappa1 + GKappa2 + GKappa3)
            ## Isobaric heat capacity.
            Cp_calc[PT] = 1000 / (4.184 * NMol * kT * T) * (avg(H**2) -
                                                            avg(H)**2)
            if hasattr(self, 'use_cvib_intra') and self.use_cvib_intra:
                logger.debug("Adding " + str(self.RefData['devib_intra'][PT]) +
                             " to the heat capacity\n")
                Cp_calc[PT] += self.RefData['devib_intra'][PT]
            if hasattr(self, 'use_cvib_inter') and self.use_cvib_inter:
                logger.debug("Adding " + str(self.RefData['devib_inter'][PT]) +
                             " to the heat capacity\n")
                Cp_calc[PT] += self.RefData['devib_inter'][PT]
            GCp1 = 2 * covde(H) * 1000 / 4.184 / (NMol * kT * T)
            GCp2 = mBeta * covde(H**2) * 1000 / 4.184 / (NMol * kT * T)
            GCp3 = 2 * Beta * avg(H) * covde(H) * 1000 / 4.184 / (NMol * kT *
                                                                  T)
            Cp_grad[PT] = GCp1 + GCp2 + GCp3
            ## Static dielectric constant.
            prefactor = 30.348705333964077
            D2 = avg(Dx**2) + avg(Dy**2) + avg(
                Dz**2) - avg(Dx)**2 - avg(Dy)**2 - avg(Dz)**2
            Eps0_calc[PT] = 1.0 + prefactor * (D2 / avg(V)) / T
            GD2 = 2 * (flat(np.mat(GDx) * col(W * Dx)) -
                       avg(Dx) * flat(np.mat(GDx) * col(W))) - Beta * (
                           covde(Dx**2) - 2 * avg(Dx) * covde(Dx))
            GD2 += 2 * (flat(np.mat(GDy) * col(W * Dy)) -
                        avg(Dy) * flat(np.mat(GDy) * col(W))) - Beta * (
                            covde(Dy**2) - 2 * avg(Dy) * covde(Dy))
            GD2 += 2 * (flat(np.mat(GDz) * col(W * Dz)) -
                        avg(Dz) * flat(np.mat(GDz) * col(W))) - Beta * (
                            covde(Dz**2) - 2 * avg(Dz) * covde(Dz))
            Eps0_grad[PT] = prefactor * (GD2 / avg(V) -
                                         mBeta * covde(V) * D2 / avg(V)**2) / T
            ## Average area per lipid
            Al_calc[PT] = np.dot(W, A)
            Al_grad[PT] = mBeta * (flat(np.mat(G) * col(W * A)) -
                                   np.dot(W, A) * Gbar)
            ## Deuterium order parameter
            Scd_calc[PT] = np.dot(W, S)
            Scd_grad[PT] = mBeta * (
                flat(np.average(np.mat(G) * (S * W[:, np.newaxis]), axis=1)) -
                np.average(np.average(S * W[:, np.newaxis], axis=0), axis=0) *
                Gbar)
            ## Estimation of errors.
            Rho_std[PT] = np.sqrt(sum(C**2 * np.array(Rho_errs)**2))
            Alpha_std[PT] = np.sqrt(sum(C**2 * np.array(Alpha_errs)**2)) * 1e4
            Kappa_std[PT] = np.sqrt(sum(C**2 * np.array(Kappa_errs)**2)) * 1e6
            Cp_std[PT] = np.sqrt(sum(C**2 * np.array(Cp_errs)**2))
            Eps0_std[PT] = np.sqrt(sum(C**2 * np.array(Eps0_errs)**2))
            Al_std[PT] = np.sqrt(sum(C**2 * np.array(Al_errs)**2))
            Scd_std[PT] = np.sqrt(sum(np.mat(C**2) * np.array(Scd_errs)**2))

        # Get contributions to the objective function
        X_Rho, G_Rho, H_Rho, RhoPrint = self.objective_term(Points,
                                                            'rho',
                                                            Rho_calc,
                                                            Rho_std,
                                                            Rho_grad,
                                                            name="Density")
        X_Alpha, G_Alpha, H_Alpha, AlphaPrint = self.objective_term(
            Points,
            'alpha',
            Alpha_calc,
            Alpha_std,
            Alpha_grad,
            name="Thermal Expansion")
        X_Kappa, G_Kappa, H_Kappa, KappaPrint = self.objective_term(
            Points,
            'kappa',
            Kappa_calc,
            Kappa_std,
            Kappa_grad,
            name="Compressibility")
        X_Cp, G_Cp, H_Cp, CpPrint = self.objective_term(Points,
                                                        'cp',
                                                        Cp_calc,
                                                        Cp_std,
                                                        Cp_grad,
                                                        name="Heat Capacity")
        X_Eps0, G_Eps0, H_Eps0, Eps0Print = self.objective_term(
            Points,
            'eps0',
            Eps0_calc,
            Eps0_std,
            Eps0_grad,
            name="Dielectric Constant")
        X_Al, G_Al, H_Al, AlPrint = self.objective_term(
            Points, 'al', Al_calc, Al_std, Al_grad, name="Avg Area per Lipid")
        X_Scd, G_Scd, H_Scd, ScdPrint = self.objective_term(
            Points,
            'scd',
            Scd_calc,
            Scd_std,
            Scd_grad,
            name="Deuterium Order Parameter")

        Gradient = np.zeros(self.FF.np)
        Hessian = np.zeros((self.FF.np, self.FF.np))

        if X_Rho == 0: self.w_rho = 0.0
        if X_Alpha == 0: self.w_alpha = 0.0
        if X_Kappa == 0: self.w_kappa = 0.0
        if X_Cp == 0: self.w_cp = 0.0
        if X_Eps0 == 0: self.w_eps0 = 0.0
        if X_Al == 0: self.w_al = 0.0
        if X_Scd == 0: self.w_scd = 0.0

        w_tot = self.w_rho + self.w_alpha + self.w_kappa + self.w_cp + self.w_eps0 + self.w_al + self.w_scd
        w_1 = self.w_rho / w_tot
        w_3 = self.w_alpha / w_tot
        w_4 = self.w_kappa / w_tot
        w_5 = self.w_cp / w_tot
        w_6 = self.w_eps0 / w_tot
        w_7 = self.w_al / w_tot
        w_8 = self.w_scd / w_tot

        Objective = w_1 * X_Rho + w_3 * X_Alpha + w_4 * X_Kappa + w_5 * X_Cp + w_6 * X_Eps0 + w_7 * X_Al + w_8 * X_Scd
        if AGrad:
            Gradient = w_1 * G_Rho + w_3 * G_Alpha + w_4 * G_Kappa + w_5 * G_Cp + w_6 * G_Eps0 + w_7 * G_Al + w_8 * G_Scd
        if AHess:
            Hessian = w_1 * H_Rho + w_3 * H_Alpha + w_4 * H_Kappa + w_5 * H_Cp + w_6 * H_Eps0 + w_7 * H_Al + w_8 * H_Scd

        if not in_fd():
            self.Xp = {
                "Rho": X_Rho,
                "Alpha": X_Alpha,
                "Kappa": X_Kappa,
                "Cp": X_Cp,
                "Eps0": X_Eps0,
                "Al": X_Al,
                "Scd": X_Scd
            }
            self.Wp = {
                "Rho": w_1,
                "Alpha": w_3,
                "Kappa": w_4,
                "Cp": w_5,
                "Eps0": w_6,
                "Al": w_7,
                "Scd": w_8
            }
            self.Pp = {
                "Rho": RhoPrint,
                "Alpha": AlphaPrint,
                "Kappa": KappaPrint,
                "Cp": CpPrint,
                "Eps0": Eps0Print,
                "Al": AlPrint,
                "Scd": ScdPrint
            }
            if AGrad:
                self.Gp = {
                    "Rho": G_Rho,
                    "Alpha": G_Alpha,
                    "Kappa": G_Kappa,
                    "Cp": G_Cp,
                    "Eps0": G_Eps0,
                    "Al": G_Al,
                    "Scd": G_Scd
                }
            self.Objective = Objective

        Answer = {'X': Objective, 'G': Gradient, 'H': Hessian}
        return Answer
예제 #3
0
def fe_mbar(comp, pose, mode, rest_file, temperature):

    kB = 1.381e-23 * 6.022e23 / (4.184 * 1000.0
                                 )  # Boltzmann constant in kJ/mol/K
    beta = 1 / (kB * temperature)  # beta
    N_max = 20000  # Max frames for any simulation window, you should check this if you did some long runs

    ### Change to pose directory
    os.chdir('fe')
    os.chdir(pose)
    if comp != 'u':
        os.chdir('rest')
    else:
        os.chdir('pmf')
    if not os.path.exists('data'):
        os.makedirs('data')

    # Define log file
    sys.stdout = open('./data/mbar-' + comp + '-' + mode + '.log', 'w')

    ### Determine Number of windows
    K = 0
    filename = './' + comp + '%02.0f/%s' % (K, rest_file)
    while os.path.isfile(filename):
        K = K + 1
        filename = './' + comp + '%02.0f/%s' % (K, rest_file)

    ## Determine Number of restraints
    infile = open('./' + comp + '00/disang.rest', 'r')
    disang = infile.readlines()
    infile.close()
    R = 0
    if (comp == 't' or comp == 'u'):
        for line in disang:
            cols = line.split()
            if len(cols) != 0 and (cols[-1] == "#Lig_TR"):
                R += 1
    elif (comp == 'l' or comp == 'c'):
        for line in disang:
            cols = line.split()
            if len(cols) != 0 and (cols[-1] == "#Lig_C"
                                   or cols[-1] == "#Lig_D"):
                R += 1
    elif (comp == 'a' or comp == 'r'):
        for line in disang:
            cols = line.split()
            if len(cols) != 0 and (cols[-1] == "#Rec_C"
                                   or cols[-1] == "#Rec_D"):
                R += 1

    print "K= %5.0f  R= %5.0f" % (K, R)

    ### Calculate Statistical Inefficiency (g)
    def calcg(data):
        sum = 0
        randnum = ("%05.0f" % (int(100000 * np.random.random())))
        datafn = '/dev/shm/series.' + randnum + '.dat'
        acffn = '/dev/shm/acf.' + randnum + '.dat'
        cppfn = '/dev/shm/pt-acf.' + randnum + '.in'
        np.savetxt(datafn, data)
        cpptin = open(cppfn, 'w')
        cpptin.write("readdata " + datafn + " name " + randnum +
                     "\nautocorr " + randnum + " out " + acffn + " noheader\n")
        cpptin.close()

        FNULL = open(os.devnull, 'w')
        sp.call(['cpptraj', '-i', cppfn], stdout=FNULL, stderr=sp.STDOUT)

        with open(acffn, 'r') as acf:
            for line in acf:
                col = line.split()
                t = float(col[0]) - 1.0
        T = t

        with open(acffn, 'r') as acf:
            for line in acf:
                col = line.split()
                t = float(col[0]) - 1.0
                v = float(col[1])
                if t == 0:
                    continue
                if v < 0.0:
                    break
                sum += (1 - (t / T)) * (v)

        sp.call(['rm', datafn, acffn, cppfn])

        return 1 + (2 * sum)

    ### Allocate storage for simulation data
    N = np.zeros(
        [K], np.int32
    )  # N_k[k] is the number of snapshots to be used from umbrella simulation k
    Neff = np.zeros([K], np.int32)
    Nind = np.zeros([K], np.int32)
    rty = ['d'] * R  # restraint type (distance or angle)
    rfc = np.zeros([K, R], np.float64)  # restraint force constant
    req = np.zeros([K, R], np.float64)  # restraint target value
    val = np.zeros(
        [N_max, K, R],
        np.float64)  # value of the restrained variable at each frame n
    g = np.zeros([K], np.float64)
    u = np.zeros([N_max], np.float64)

    ### Read the simulation data
    for k in range(K):
        # Read Equilibrium Value and Force Constant
        filename = './' + comp + '%02.0f/disang.rest' % k
        infile = open(filename, 'r')
        disang = infile.readlines()
        infile.close()
        r = 0
        for line in disang:
            cols = line.split()
            if (comp == 't' or comp == 'u'):
                if len(cols) != 0 and (cols[-1] == "#Lig_TR"):
                    natms = len(cols[2].split(',')) - 1
                    req[k, r] = float(cols[6].replace(",", ""))
                    if natms == 2:
                        rty[r] = 'd'
                        rfc[k, r] = float(cols[12].replace(",", ""))
                    elif natms == 3:
                        rty[r] = 'a'
                        rfc[k, r] = float(cols[12].replace(
                            ",", "")) * (np.pi / 180.0) * (
                                np.pi / 180.0)  ### Convert to degrees
                    elif natms == 4:
                        rty[r] = 't'
                        rfc[k, r] = float(cols[12].replace(
                            ",", "")) * (np.pi / 180.0) * (
                                np.pi / 180.0)  ### Convert to degrees
                    else:
                        sys.exit("not sure about restraint type!")
                    r += 1
            elif (comp == 'l' or comp == 'c'):
                if len(cols) != 0 and (cols[-1] == "#Lig_C"
                                       or cols[-1] == "#Lig_D"):
                    natms = len(cols[2].split(',')) - 1
                    req[k, r] = float(cols[6].replace(",", ""))
                    if natms == 2:
                        rty[r] = 'd'
                        rfc[k, r] = float(cols[12].replace(",", ""))
                    elif natms == 3:
                        rty[r] = 'a'
                        rfc[k, r] = float(cols[12].replace(
                            ",", "")) * (np.pi / 180.0) * (
                                np.pi / 180.0)  ### Convert to degrees
                    elif natms == 4:
                        rty[r] = 't'
                        rfc[k, r] = float(cols[12].replace(
                            ",", "")) * (np.pi / 180.0) * (
                                np.pi / 180.0)  ### Convert to degrees
                    else:
                        sys.exit("not sure about restraint type!")
                    r += 1
            elif (comp == 'a' or comp == 'r'):
                if len(cols) != 0 and (cols[-1] == "#Rec_C"
                                       or cols[-1] == "#Rec_D"):
                    natms = len(cols[2].split(',')) - 1
                    req[k, r] = float(cols[6].replace(",", ""))
                    if natms == 2:
                        rty[r] = 'd'
                        rfc[k, r] = float(cols[12].replace(",", ""))
                    elif natms == 3:
                        rty[r] = 'a'
                        rfc[k, r] = float(cols[12].replace(
                            ",", "")) * (np.pi / 180.0) * (
                                np.pi / 180.0)  ### Convert to degrees
                    elif natms == 4:
                        rty[r] = 't'
                        rfc[k, r] = float(cols[12].replace(
                            ",", "")) * (np.pi / 180.0) * (
                                np.pi / 180.0)  ### Convert to degrees
                    else:
                        sys.exit("not sure about restraint type!")
                    r += 1

        # Read in Values for restrained variables for each simulation
        filename = './' + comp + '%02.0f/%s' % (k, rest_file)
        infile = open(filename, 'r')
        restdat = infile.readlines(
        )  # slice off first 20 lines  readlines()[20:]
        infile.close()
        # Parse Data
        n = 0
        for line in restdat:
            if line[0] != '#' and line[0] != '@' and n < N_max:
                cols = line.split()
                for r in range(R):
                    if rty[r] == 't':  # Do phase corrections
                        tmp = float(cols[r + 1])
                        if tmp < req[k, r] - 180.0:
                            val[n, k, r] = tmp + 360
                        elif tmp > req[k, r] + 180.0:
                            val[n, k, r] = tmp - 360
                        else:
                            val[n, k, r] = tmp
                    else:
                        val[n, k, r] = float(cols[r + 1])
                n += 1

        N[k] = n

        # Calculate Reduced Potential
        if comp != 'u':  ### Attach/Release Restraints
            if rfc[k, 0] == 0:
                tmp = np.ones(
                    [R], np.float64
                ) * 0.001  ########## CHECK THIS!! might interfere on protein attach
                u[0:N[k]] = np.sum(beta * tmp[0:R] *
                                   ((val[0:N[k], k, 0:R] - req[k, 0:R])**2),
                                   axis=1)
            else:
                u[0:N[k]] = np.sum(beta * rfc[k, 0:R] *
                                   ((val[0:N[k], k, 0:R] - req[k, 0:R])**2),
                                   axis=1)
        else:  ### Umbrella/Translation
            u[0:N[k]] = (beta * rfc[k, 0] *
                         ((val[0:N[k], k, 0] - req[k, 0])**2))

        if mode == 'sub':
            g[k] = calcg(u[0:N[k]])
            subs = timeseries.subsampleCorrelatedData(np.zeros([N[k]]), g=g[k])
            Nind[k] = len(subs)
            Neff[k] = Nind[k]
        else:
            g[k] = 1.00
            Neff[k] = N[k]

        print "Processed Window %5.0f.  N= %12.0f.  g= %10.3f   Neff= %12.0f" % (
            k, N[k], g[k], Neff[k])

    Upot = np.zeros([K, K, np.max(Neff)], np.float64)

    # Calculate Restraint Energy
    for k in range(K):
        if mode == 'sub':  #subsampling
            subs = timeseries.subsampleCorrelatedData(np.zeros([N[k]]), g=g[k])
            for l in range(K):
                if comp != 'u':  # Attach Restraints
                    Upot[k, l, 0:Neff[k]] = np.sum(
                        beta * rfc[l, 0:R] *
                        ((val[subs[0:Neff[k]], k, 0:R] - req[l, 0:R])**2),
                        axis=1)
                else:  # Umbrella/Translation
                    Upot[k, l, 0:Neff[k]] = (beta * rfc[l, 0] * (
                        (val[subs[0:Neff[k]], k, 0] - req[l, 0])**2))
        else:
            Neff[k] = N[k]
            for l in range(K):  # all samples
                if comp != 'u':  # Attach Restraints
                    Upot[k, l, 0:Neff[k]] = np.sum(
                        beta * rfc[l, 0:R] *
                        ((val[0:Neff[k], k, 0:R] - req[l, 0:R])**2),
                        axis=1)
                else:  # Umbrella/Translation
                    Upot[k, l,
                         0:Neff[k]] = (beta * rfc[l, 0] *
                                       ((val[0:Neff[k], k, 0] - req[l, 0])**2))

    val = []

    print "Running MBAR... "
    mbar = pymbar.MBAR(Upot,
                       Neff,
                       verbose=True,
                       method='adaptive',
                       initialize='BAR')

    print "Calculate Free Energy Differences Between States"
    [Deltaf, dDeltaf] = mbar.getFreeEnergyDifferences()

    min = np.argmin(Deltaf[0])

    # Write to file
    print "Free Energy Differences (in units of kcal/mol)"
    print "%9s %8s %8s %12s %12s" % ('bin', 'f', 'df', 'deq', 'dfc')
    datfile = open('./data/mbar-' + comp + '-' + mode + '.dat', 'w')
    for k in range(K):
        if comp != 'u':  # Attach/release
            print "%10.5f %10.5f %10.5f %12.7f %12.7f" % (
                rfc[k, 0] / rfc[-1, 0], Deltaf[0, k] / beta,
                dDeltaf[0, k] / beta, req[k, 0], rfc[k, 0])
            datfile.write("%10.5f %10.5f %10.5f %12.7f %12.7f\n" %
                          (rfc[k, 0] / rfc[-1, 0], Deltaf[0, k] / beta,
                           dDeltaf[0, k] / beta, req[k, 0], rfc[k, 0]))
        else:  # Umbrella/Translation
            print "%10.5f %10.5f %10.5f %12.7f %12.7f" % (
                req[k, 0], Deltaf[0, k] / beta, dDeltaf[0, k] / beta,
                req[k, 0], rfc[k, 0])
            datfile.write("%10.5f %10.5f %10.5f %12.7f %12.7f\n" %
                          (req[k, 0], Deltaf[0, k] / beta,
                           dDeltaf[0, k] / beta, req[k, 0], rfc[k, 0]))
    datfile.close()
    print "\n\n"

    os.chdir('../../../')
예제 #4
0
    def get(self, mvals, AGrad=True, AHess=True):
        
        """
        Fitting of liquid bulk properties.  This is the current major
        direction of development for ForceBalance.  Basically, fitting
        the QM energies / forces alone does not always give us the
        best simulation behavior.  In many cases it makes more sense
        to try and reproduce some experimentally known data as well.

        In order to reproduce experimentally known data, we need to
        run a simulation and compare the simulation result to
        experiment.  The main challenge here is that the simulations
        are computationally intensive (i.e. they require energy and
        force evaluations), and furthermore the results are noisy.  We
        need to run the simulations automatically and remotely
        (i.e. on clusters) and a good way to calculate the derivatives
        of the simulation results with respect to the parameter values.

        This function contains some experimentally known values of the
        density and enthalpy of vaporization (Hvap) of liquid water.
        It launches the density and Hvap calculations on the cluster,
        and gathers the results / derivatives.  The actual calculation
        of results / derivatives is done in a separate file.

        After the results come back, they are gathered together to form
        an objective function.

        @param[in] mvals Mathematical parameter values
        @param[in] AGrad Switch to turn on analytic gradient
        @param[in] AHess Switch to turn on analytic Hessian
        @return Answer Contribution to the objective function
        
        """
        
        unpack = forcebalance.nifty.lp_load('forcebalance.p')
        mvals1 = unpack[1]
        if (np.max(np.abs(mvals1 - mvals)) > 1e-3):
            warn_press_key("mvals from forcebalance.p does not match up with internal values! (Are you reading data from a previous run?)\nmvals(call)=%s mvals(disk)=%s" % (mvals, mvals1))

        mbar_verbose = False

        Answer = {}

        Results = {}
        Points = []  # These are the phase points for which data exists.
        BPoints = [] # These are the phase points for which we are doing MBAR for the condensed phase.
        mBPoints = [] # These are the phase points for which we are doing MBAR for the monomers.
        mPoints = [] # These are the phase points to use for enthalpy of vaporization; if we're scanning pressure then set hvap_wt for higher pressures to zero.
        tt = 0
        for label, PT in zip(self.Labels, self.PhasePoints):
            if os.path.exists('./%s/npt_result.p' % label):
                logger.info('Reading information from ./%s/npt_result.p\n' % label)
                Points.append(PT)
                Results[tt] = lp_load('./%s/npt_result.p' % label)
                if 'hvap' in self.RefData and PT[0] not in [i[0] for i in mPoints]:
                    mPoints.append(PT)
                if 'mbar' in self.RefData and PT in self.RefData['mbar'] and self.RefData['mbar'][PT]:
                    BPoints.append(PT)
                    if 'hvap' in self.RefData and PT[0] not in [i[0] for i in mBPoints]:
                        mBPoints.append(PT)
                tt += 1
            else:
                logger.warning('In %s :\n' % os.getcwd())
                logger.warning('The file ./%s/npt_result.p does not exist so we cannot read it\n' % label)
                pass
        if len(Points) == 0:
            logger.error('The liquid simulations have terminated with \x1b[1;91mno readable data\x1b[0m - this is a problem!\n')
            raise RuntimeError

        # Assign variable names to all the stuff in npt_result.p
        Rhos, Vols, Potentials, Energies, Dips, Grads, GDips, mPotentials, mEnergies, mGrads, \
            Rho_errs, Hvap_errs, Alpha_errs, Kappa_errs, Cp_errs, Eps0_errs, NMols = ([Results[t][i] for t in range(len(Points))] for i in range(17))
        # Determine the number of molecules
        if len(set(NMols)) != 1:
            logger.error(str(NMols))
            logger.error('The above list should only contain one number - the number of molecules\n')
            raise RuntimeError
        else:
            NMol = list(set(NMols))[0]
    
        if not self.adapt_errors:
            self.AllResults = defaultdict(lambda:defaultdict(list))

        astrm = astr(mvals)
        if len(Points) != len(self.Labels):
            logger.info("Data sets is not full, will not use for concatenation.")
            astrm += "_"*(Counter()+1)
        self.AllResults[astrm]['Pts'].append(Points)
        self.AllResults[astrm]['mPts'].append(Points)
        self.AllResults[astrm]['E'].append(np.array(Energies))
        self.AllResults[astrm]['V'].append(np.array(Vols))
        self.AllResults[astrm]['R'].append(np.array(Rhos))
        self.AllResults[astrm]['Dx'].append(np.array([d[:,0] for d in Dips]))
        self.AllResults[astrm]['Dy'].append(np.array([d[:,1] for d in Dips]))
        self.AllResults[astrm]['Dz'].append(np.array([d[:,2] for d in Dips]))
        self.AllResults[astrm]['G'].append(np.array(Grads))
        self.AllResults[astrm]['GDx'].append(np.array([gd[0] for gd in GDips]))
        self.AllResults[astrm]['GDy'].append(np.array([gd[1] for gd in GDips]))
        self.AllResults[astrm]['GDz'].append(np.array([gd[2] for gd in GDips]))
        self.AllResults[astrm]['L'].append(len(Energies[0]))
        self.AllResults[astrm]['Steps'].append(self.liquid_md_steps)

        if len(mPoints) > 0:
            self.AllResults[astrm]['mE'].append(np.array([i for pt, i in zip(Points,mEnergies) if pt in mPoints]))
            self.AllResults[astrm]['mG'].append(np.array([i for pt, i in zip(Points,mGrads) if pt in mPoints]))

        # Number of data sets belonging to this value of the parameters.
        Nrpt = len(self.AllResults[astrm]['R'])
        sumsteps = sum(self.AllResults[astrm]['Steps'])
        if self.liquid_md_steps != sumsteps:
            printcool("This objective function evaluation combines %i datasets\n" \
                          "Increasing simulation length: %i -> %i steps" % \
                          (Nrpt, self.liquid_md_steps, sumsteps), color=6)
            if self.liquid_md_steps * 2 != sumsteps:
                logger.error("Spoo!\n")
                raise RuntimeError
            self.liquid_eq_steps *= 2
            self.liquid_md_steps *= 2
            self.gas_eq_steps *= 2
            self.gas_md_steps *= 2

        # Concatenate along the data-set axis (more than 1 element  if we've returned to these parameters.)
        E, V, R, Dx, Dy, Dz = \
            (np.hstack(tuple(self.AllResults[astrm][i])) for i in \
                 ['E', 'V', 'R', 'Dx', 'Dy', 'Dz'])

        G, GDx, GDy, GDz = \
            (np.hstack((np.concatenate(tuple(self.AllResults[astrm][i]), axis=2))) for i in ['G', 'GDx', 'GDy', 'GDz'])

        if len(mPoints) > 0:
            mE = np.hstack(tuple(self.AllResults[astrm]['mE']))
            mG = np.hstack((np.concatenate(tuple(self.AllResults[astrm]['mG']), axis=2)))
        Rho_calc = OrderedDict([])
        Rho_grad = OrderedDict([])
        Rho_std  = OrderedDict([])
        Hvap_calc = OrderedDict([])
        Hvap_grad = OrderedDict([])
        Hvap_std  = OrderedDict([])
        Alpha_calc = OrderedDict([])
        Alpha_grad = OrderedDict([])
        Alpha_std  = OrderedDict([])
        Kappa_calc = OrderedDict([])
        Kappa_grad = OrderedDict([])
        Kappa_std  = OrderedDict([])
        Cp_calc = OrderedDict([])
        Cp_grad = OrderedDict([])
        Cp_std  = OrderedDict([])
        Eps0_calc = OrderedDict([])
        Eps0_grad = OrderedDict([])
        Eps0_std  = OrderedDict([])

        # The unit that converts atmospheres * nm**3 into kj/mol :)
        pvkj=0.061019351687175

        # Run MBAR using the total energies. Required for estimates that use the kinetic energy.
        BSims = len(BPoints)
        Shots = len(E[0])
        N_k = np.ones(BSims)*Shots
        # Use the value of the energy for snapshot t from simulation k at potential m
        U_kln = np.zeros([BSims,BSims,Shots])
        for m, PT in enumerate(BPoints):
            T = PT[0]
            P = PT[1] / 1.01325 if PT[2] == 'bar' else PT[1]
            beta = 1. / (kb * T)
            for k in range(BSims):
                # The correct Boltzmann factors include PV.
                # Note that because the Boltzmann factors are computed from the conditions at simulation "m",
                # the pV terms must be rescaled to the pressure at simulation "m".
                kk = Points.index(BPoints[k])
                U_kln[k, m, :]   = E[kk] + P*V[kk]*pvkj
                U_kln[k, m, :]  *= beta
        W1 = None
        if len(BPoints) > 1:
            logger.info("Running MBAR analysis on %i states...\n" % len(BPoints))
            mbar = pymbar.MBAR(U_kln, N_k, verbose=mbar_verbose, relative_tolerance=5.0e-8)
            W1 = mbar.getWeights()
            logger.info("Done\n")
        elif len(BPoints) == 1:
            W1 = np.ones((BPoints*Shots,BPoints))
            W1 /= BPoints*Shots
        
        def fill_weights(weights, phase_points, mbar_points, snapshots):
            """ Fill in the weight matrix with MBAR weights where MBAR was run, 
            and equal weights otherwise. """
            new_weights = np.zeros([len(phase_points)*snapshots,len(phase_points)])
            for m, PT in enumerate(phase_points):
                if PT in mbar_points:
                    mm = mbar_points.index(PT)
                    for kk, PT1 in enumerate(mbar_points):
                        k = phase_points.index(PT1)
                        logger.debug("Will fill W2[%i:%i,%i] with W1[%i:%i,%i]\n" % (k*snapshots,k*snapshots+snapshots,m,kk*snapshots,kk*snapshots+snapshots,mm))
                        new_weights[k*snapshots:(k+1)*snapshots,m] = weights[kk*snapshots:(kk+1)*snapshots,mm]
                else:
                    logger.debug("Will fill W2[%i:%i,%i] with equal weights\n" % (m*snapshots,(m+1)*snapshots,m))
                    new_weights[m*snapshots:(m+1)*snapshots,m] = 1.0/snapshots
            return new_weights
        
        W2 = fill_weights(W1, Points, BPoints, Shots)

        if len(mPoints) > 0:
            # Run MBAR on the monomers.  This is barely necessary.
            mW1 = None
            mShots = len(mE[0])
            if len(mBPoints) > 0:
                mBSims = len(mBPoints)
                mN_k = np.ones(mBSims)*mShots
                mU_kln = np.zeros([mBSims,mBSims,mShots])
                for m, PT in enumerate(mBPoints):
                    T = PT[0]
                    beta = 1. / (kb * T)
                    for k in range(mBSims):
                        kk = Points.index(mBPoints[k])
                        mU_kln[k, m, :]  = mE[kk]
                        mU_kln[k, m, :] *= beta
                if np.abs(np.std(mE)) > 1e-6 and mBSims > 1:
                    mmbar = pymbar.MBAR(mU_kln, mN_k, verbose=False, relative_tolerance=5.0e-8, method='self-consistent-iteration')
                    mW1 = mmbar.getWeights()
            elif len(mBPoints) == 1:
                mW1 = np.ones((mBSims*mShots,mSims))
                mW1 /= mBSims*mShots
            mW2 = fill_weights(mW1, mPoints, mBPoints, mShots)
         
        if self.do_self_pol:
            EPol = self.polarization_correction(mvals)
            GEPol = np.array([(f12d3p(fdwrap(self.polarization_correction, mvals, p), h = self.h, f0 = EPol)[0] if p in self.pgrad else 0.0) for p in range(self.FF.np)])
            bar = printcool("Self-polarization correction to \nenthalpy of vaporization is % .3f kJ/mol%s" % (EPol, ", Derivative:" if AGrad else ""))
            if AGrad:
                self.FF.print_map(vals=GEPol)
                logger.info(bar)

        # Arrays must be flattened now for calculation of properties.
        E = E.flatten()
        V = V.flatten()
        R = R.flatten()
        Dx = Dx.flatten()
        Dy = Dy.flatten()
        Dz = Dz.flatten()
        if len(mPoints) > 0: mE = mE.flatten()
            
        for i, PT in enumerate(Points):
            T = PT[0]
            P = PT[1] / 1.01325 if PT[2] == 'bar' else PT[1]
            PV = P*V*pvkj
            H = E + PV
            # The weights that we want are the last ones.
            W = flat(W2[:,i])
            C = weight_info(W, PT, np.ones(len(Points))*Shots, verbose=mbar_verbose)
            Gbar = flat(np.matrix(G)*col(W))
            mBeta = -1/kb/T
            Beta  = 1/kb/T
            kT    = kb*T
            # Define some things to make the analytic derivatives easier.
            def avg(vec):
                return np.dot(W,vec)
            def covde(vec):
                return flat(np.matrix(G)*col(W*vec)) - avg(vec)*Gbar
            def deprod(vec):
                return flat(np.matrix(G)*col(W*vec))
            ## Density.
            Rho_calc[PT]   = np.dot(W,R)
            Rho_grad[PT]   = mBeta*(flat(np.matrix(G)*col(W*R)) - np.dot(W,R)*Gbar)
            ## Enthalpy of vaporization.
            if PT in mPoints:
                ii = mPoints.index(PT)
                mW = flat(mW2[:,ii])
                mGbar = flat(np.matrix(mG)*col(mW))
                Hvap_calc[PT]  = np.dot(mW,mE) - np.dot(W,E)/NMol + kb*T - np.dot(W, PV)/NMol
                Hvap_grad[PT]  = mGbar + mBeta*(flat(np.matrix(mG)*col(mW*mE)) - np.dot(mW,mE)*mGbar)
                Hvap_grad[PT] -= (Gbar + mBeta*(flat(np.matrix(G)*col(W*E)) - np.dot(W,E)*Gbar)) / NMol
                Hvap_grad[PT] -= (mBeta*(flat(np.matrix(G)*col(W*PV)) - np.dot(W,PV)*Gbar)) / NMol
                if self.do_self_pol:
                    Hvap_calc[PT] -= EPol
                    Hvap_grad[PT] -= GEPol
                if hasattr(self,'use_cni') and self.use_cni:
                    if not ('cni' in self.RefData and self.RefData['cni'][PT]):
                        logger.error('Asked for a nonideality correction but not provided in reference data (data.csv).  Either disable the option in data.csv or add data.\n')
                        raise RuntimeError
                    logger.debug("Adding % .3f to enthalpy of vaporization at " % self.RefData['cni'][PT] + str(PT) + '\n')
                    Hvap_calc[PT] += self.RefData['cni'][PT]
                if hasattr(self,'use_cvib_intra') and self.use_cvib_intra:
                    if not ('cvib_intra' in self.RefData and self.RefData['cvib_intra'][PT]):
                        logger.error('Asked for a quantum intramolecular vibrational correction but not provided in reference data (data.csv).  Either disable the option in data.csv or add data.\n')
                        raise RuntimeError
                    logger.debug("Adding % .3f to enthalpy of vaporization at " % self.RefData['cvib_intra'][PT] + str(PT) + '\n')
                    Hvap_calc[PT] += self.RefData['cvib_intra'][PT]
                if hasattr(self,'use_cvib_inter') and self.use_cvib_inter:
                    if not ('cvib_inter' in self.RefData and self.RefData['cvib_inter'][PT]):
                        logger.error('Asked for a quantum intermolecular vibrational correction but not provided in reference data (data.csv).  Either disable the option in data.csv or add data.\n')
                        raise RuntimeError
                    logger.debug("Adding % .3f to enthalpy of vaporization at " % self.RefData['cvib_inter'][PT] + str(PT) + '\n')
                    Hvap_calc[PT] += self.RefData['cvib_inter'][PT]
            else:
                Hvap_calc[PT]  = 0.0
                Hvap_grad[PT]  = np.zeros(self.FF.np)
            ## Thermal expansion coefficient.
            Alpha_calc[PT] = 1e4 * (avg(H*V)-avg(H)*avg(V))/avg(V)/(kT*T)
            GAlpha1 = -1 * Beta * deprod(H*V) * avg(V) / avg(V)**2
            GAlpha2 = +1 * Beta * avg(H*V) * deprod(V) / avg(V)**2
            GAlpha3 = deprod(V)/avg(V) - Gbar
            GAlpha4 = Beta * covde(H)
            Alpha_grad[PT] = 1e4 * (GAlpha1 + GAlpha2 + GAlpha3 + GAlpha4)/(kT*T)
            ## Isothermal compressibility.
            bar_unit = 0.06022141793 * 1e6
            Kappa_calc[PT] = bar_unit / kT * (avg(V**2)-avg(V)**2)/avg(V)
            GKappa1 = +1 * Beta**2 * avg(V**2) * deprod(V) / avg(V)**2
            GKappa2 = -1 * Beta**2 * avg(V) * deprod(V**2) / avg(V)**2
            GKappa3 = +1 * Beta**2 * covde(V)
            Kappa_grad[PT] = bar_unit*(GKappa1 + GKappa2 + GKappa3)
            ## Isobaric heat capacity.
            Cp_calc[PT] = 1000/(4.184*NMol*kT*T) * (avg(H**2) - avg(H)**2)
            if hasattr(self,'use_cvib_intra') and self.use_cvib_intra:
                logger.debug("Adding " + str(self.RefData['devib_intra'][PT]) + " to the heat capacity\n")
                Cp_calc[PT] += self.RefData['devib_intra'][PT]
            if hasattr(self,'use_cvib_inter') and self.use_cvib_inter:
                logger.debug("Adding " + str(self.RefData['devib_inter'][PT]) + " to the heat capacity\n")
                Cp_calc[PT] += self.RefData['devib_inter'][PT]
            GCp1 = 2*covde(H) * 1000 / 4.184 / (NMol*kT*T)
            GCp2 = mBeta*covde(H**2) * 1000 / 4.184 / (NMol*kT*T)
            GCp3 = 2*Beta*avg(H)*covde(H) * 1000 / 4.184 / (NMol*kT*T)
            Cp_grad[PT] = GCp1 + GCp2 + GCp3
            ## Static dielectric constant.
            prefactor = 30.348705333964077
            D2 = avg(Dx**2)+avg(Dy**2)+avg(Dz**2)-avg(Dx)**2-avg(Dy)**2-avg(Dz)**2
            Eps0_calc[PT] = 1.0 + prefactor*(D2/avg(V))/T
            GD2  = 2*(flat(np.matrix(GDx)*col(W*Dx)) - avg(Dx)*flat(np.matrix(GDx)*col(W))) - Beta*(covde(Dx**2) - 2*avg(Dx)*covde(Dx))
            GD2 += 2*(flat(np.matrix(GDy)*col(W*Dy)) - avg(Dy)*flat(np.matrix(GDy)*col(W))) - Beta*(covde(Dy**2) - 2*avg(Dy)*covde(Dy))
            GD2 += 2*(flat(np.matrix(GDz)*col(W*Dz)) - avg(Dz)*flat(np.matrix(GDz)*col(W))) - Beta*(covde(Dz**2) - 2*avg(Dz)*covde(Dz))
            Eps0_grad[PT] = prefactor*(GD2/avg(V) - mBeta*covde(V)*D2/avg(V)**2)/T
            ## Estimation of errors.
            Rho_std[PT]    = np.sqrt(sum(C**2 * np.array(Rho_errs)**2))
            if PT in mPoints:
                Hvap_std[PT]   = np.sqrt(sum(C**2 * np.array(Hvap_errs)**2))
            else:
                Hvap_std[PT]   = 0.0
            Alpha_std[PT]   = np.sqrt(sum(C**2 * np.array(Alpha_errs)**2)) * 1e4
            Kappa_std[PT]   = np.sqrt(sum(C**2 * np.array(Kappa_errs)**2)) * 1e6
            Cp_std[PT]   = np.sqrt(sum(C**2 * np.array(Cp_errs)**2))
            Eps0_std[PT]   = np.sqrt(sum(C**2 * np.array(Eps0_errs)**2))

        # Get contributions to the objective function
        X_Rho, G_Rho, H_Rho, RhoPrint = self.objective_term(Points, 'rho', Rho_calc, Rho_std, Rho_grad, name="Density")
        X_Hvap, G_Hvap, H_Hvap, HvapPrint = self.objective_term(Points, 'hvap', Hvap_calc, Hvap_std, Hvap_grad, name="H_vap", SubAverage=self.hvap_subaverage)
        X_Alpha, G_Alpha, H_Alpha, AlphaPrint = self.objective_term(Points, 'alpha', Alpha_calc, Alpha_std, Alpha_grad, name="Thermal Expansion")
        X_Kappa, G_Kappa, H_Kappa, KappaPrint = self.objective_term(Points, 'kappa', Kappa_calc, Kappa_std, Kappa_grad, name="Compressibility")
        X_Cp, G_Cp, H_Cp, CpPrint = self.objective_term(Points, 'cp', Cp_calc, Cp_std, Cp_grad, name="Heat Capacity")
        X_Eps0, G_Eps0, H_Eps0, Eps0Print = self.objective_term(Points, 'eps0', Eps0_calc, Eps0_std, Eps0_grad, name="Dielectric Constant")

        Gradient = np.zeros(self.FF.np)
        Hessian = np.zeros((self.FF.np,self.FF.np))

        if X_Rho == 0: self.w_rho = 0.0
        if X_Hvap == 0: self.w_hvap = 0.0
        if X_Alpha == 0: self.w_alpha = 0.0
        if X_Kappa == 0: self.w_kappa = 0.0
        if X_Cp == 0: self.w_cp = 0.0
        if X_Eps0 == 0: self.w_eps0 = 0.0

        if self.w_normalize:
            w_tot = self.w_rho + self.w_hvap + self.w_alpha + self.w_kappa + self.w_cp + self.w_eps0
        else:
            w_tot = 1.0
        w_1 = self.w_rho / w_tot
        w_2 = self.w_hvap / w_tot
        w_3 = self.w_alpha / w_tot
        w_4 = self.w_kappa / w_tot
        w_5 = self.w_cp / w_tot
        w_6 = self.w_eps0 / w_tot

        Objective    = w_1 * X_Rho + w_2 * X_Hvap + w_3 * X_Alpha + w_4 * X_Kappa + w_5 * X_Cp + w_6 * X_Eps0
        if AGrad:
            Gradient = w_1 * G_Rho + w_2 * G_Hvap + w_3 * G_Alpha + w_4 * G_Kappa + w_5 * G_Cp + w_6 * G_Eps0
        if AHess:
            Hessian  = w_1 * H_Rho + w_2 * H_Hvap + w_3 * H_Alpha + w_4 * H_Kappa + w_5 * H_Cp + w_6 * H_Eps0

        if not in_fd():
            self.Xp = {"Rho" : X_Rho, "Hvap" : X_Hvap, "Alpha" : X_Alpha, 
                           "Kappa" : X_Kappa, "Cp" : X_Cp, "Eps0" : X_Eps0}
            self.Wp = {"Rho" : w_1, "Hvap" : w_2, "Alpha" : w_3, 
                           "Kappa" : w_4, "Cp" : w_5, "Eps0" : w_6}
            self.Pp = {"Rho" : RhoPrint, "Hvap" : HvapPrint, "Alpha" : AlphaPrint, 
                           "Kappa" : KappaPrint, "Cp" : CpPrint, "Eps0" : Eps0Print}
            if AGrad:
                self.Gp = {"Rho" : G_Rho, "Hvap" : G_Hvap, "Alpha" : G_Alpha, 
                               "Kappa" : G_Kappa, "Cp" : G_Cp, "Eps0" : G_Eps0}
            self.Objective = Objective

        Answer = {'X':Objective, 'G':Gradient, 'H':Hessian}
        return Answer