Beispiel #1
0
def run(system_filename, state_filename, integrator_filename, sysname, Neff_cutoff, csv_filename, dcd_filename):

    Neff_cutoff = float(Neff_cutoff)

    system, positions, groups, temperature, timestep, langevin_timestep, testsystem, equil_steps, steps_per_hmc = lb_loader.load(sysname)

    state = mm.XmlSerializer.deserialize(open(state_filename).read())
    testsystem = pickle.load(open(system_filename, 'rb'))
    integrator = pickle.load(open(integrator_filename, 'rb'))

    itype = type(integrator).__name__
    print(itype)

    simulation = lb_loader.build(testsystem, integrator, temperature, state=state)
    simulation.runForClockTime(1.0 * u.minutes)

    output_frequency = 100 if "Langevin" in itype else 1
    kineticEnergy = True if "MJHMC" in itype else False
    simulation.reporters.append(app.StateDataReporter(csv_filename, output_frequency, step=True,
                                                      time=True, potentialEnergy=True, kineticEnergy=kineticEnergy,
                                                      temperature=True, density=True, elapsedTime=True))
    simulation.reporters.append(app.DCDReporter(dcd_filename, output_frequency))
    lb_loader.converge(simulation, csv_filename, Neff_cutoff)
Beispiel #2
0
data = []
for i in range(100000):
    integrator.step(1)
    energy = context.getState(
        getEnergy=True).getPotentialEnergy() / u.kilojoules_per_mole
    data.append(dict(accept=integrator.accept, energy=energy))

data = pd.DataFrame(data)
energies = data.energy.values

energies.mean()
energies.mean() - E0

g = pymbar.timeseries.statisticalInefficiency(energies)
Neff = len(energies) / g
stderr = energies.std() / (Neff**0.5)

data, g, Neff, mu, sigma, stderr = lb_loader.converge(context,
                                                      n_steps=1,
                                                      Neff_cutoff=1E4)

data = [dict(energy=E0)]
data = pd.DataFrame(data)
while True:
    integrator.step(1)
    state = context.getState(getEnergy=True)
    energy = state.getPotentialEnergy() / u.kilojoules_per_mole
    current_data = dict(energy=energy)
    data.ix[len(data)] = current_data
Beispiel #3
0
integrator = hmc_integrators.HMCIntegrator(temperature,
                                           steps_per_hmc=25,
                                           timestep=timestep)
context = lb_loader.build(system, integrator, positions, temperature)
mm.LocalEnergyMinimizer.minimize(context)
integrator.step(40000)
positions = context.getState(getPositions=True).getPositions()
print(integrator.acceptance_rate)

collision_rate = 1.0 / u.picoseconds
n_steps = 25
Neff_cutoff = 1E5

itype = "HMCIntegrator"

integrator = hmc_integrators.HMCIntegrator(temperature,
                                           steps_per_hmc=25,
                                           timestep=timestep)
context = lb_loader.build(system,
                          integrator,
                          positions,
                          temperature,
                          precision=precision)
filename = "./data/%s_%s_%s_%.3f_%d.csv" % (precision, sysname, itype,
                                            timestep / u.femtoseconds,
                                            collision_rate * u.picoseconds)
print(filename)
integrator.step(40000)
data, start, g, Neff, mu, sigma, stderr = lb_loader.converge(
    context, n_steps=n_steps, Neff_cutoff=Neff_cutoff, filename=filename)
integrator = mm.VerletIntegrator(timestep / 4.)
context = lb_loader.build(system, integrator, positions, temperature)
integrator.step(50000)
positions = context.getState(getPositions=True).getPositions()

collision_rate = 1.0 / u.picoseconds
n_steps = 25
Neff_cutoff = 2000.

grid = []

for itype in ["VerletIntegrator"]:
    for timestep_factor in [1.0, 2.0, 4.0]:
        d = dict(itype=itype, timestep=timestep / timestep_factor)
        grid.append(d)

for settings in grid:
    itype = settings.pop("itype")
    timestep = settings["timestep"]
    integrator = mm.VerletIntegrator(timestep)
    context = lb_loader.build(system, integrator, positions, temperature)
    filename = "./data/%s_%s_%.3f_%d.csv" % (sysname, itype,
                                             timestep / u.femtoseconds,
                                             collision_rate * u.picoseconds)
    print(filename)
    data, start, g, Neff = lb_loader.converge(context,
                                              n_steps=n_steps,
                                              Neff_cutoff=Neff_cutoff)
    data.to_csv(filename)
system.addForce(mm.AndersenThermostat(temperature, 1.0 / u.picoseconds))

integrator = mm.VerletIntegrator(timestep / 4.)
context = lb_loader.build(system, integrator, positions, temperature)
integrator.step(50000)
positions = context.getState(getPositions=True).getPositions()

collision_rate = 1.0 / u.picoseconds
n_steps = 25
Neff_cutoff = 2000.


grid = []

for itype in ["VerletIntegrator"]:
    for timestep_factor in [1.0, 2.0, 4.0]:
        d = dict(itype=itype, timestep=timestep / timestep_factor)
        grid.append(d)


for settings in grid:
    itype = settings.pop("itype")
    timestep = settings["timestep"]
    integrator = mm.VerletIntegrator(timestep)
    context = lb_loader.build(system, integrator, positions, temperature)
    filename = "./data/%s_%s_%.3f_%d.csv" % (sysname, itype, timestep / u.femtoseconds, collision_rate * u.picoseconds)
    print(filename)
    data, start, g, Neff = lb_loader.converge(context, n_steps=n_steps, Neff_cutoff=Neff_cutoff)
    data.to_csv(filename)
    
import lb_loader
import pandas as pd
import simtk.openmm.app as app
import numpy as np
import simtk.openmm as mm
from simtk import unit as u
from openmmtools import hmc_integrators, testsystems

precision = "mixed"

sysname = "switchedaccuratewater"

system, positions, groups, temperature, timestep, langevin_timestep, testsystem = lb_loader.load(sysname)

positions, boxes = lb_loader.equilibrate(system, temperature, timestep, positions, equil_steps, minimize=True)

collision_rate = 1.0 / u.picoseconds
n_steps = 25
Neff_cutoff = 1E5

itype = "LangevinIntegrator"

langevin_timestep = 0.4 * u.femtoseconds

integrator = mm.LangevinIntegrator(temperature, collision_rate, langevin_timestep)
context = lb_loader.build(system, integrator, positions, temperature, precision=precision)
filename = "./data/%s_%s_%s_%.3f_%d.csv" % (precision, sysname, itype, langevin_timestep / u.femtoseconds, collision_rate * u.picoseconds)
print(filename)
integrator.step(450000)
data, start, g, Neff, mu, sigma, stderr = lb_loader.converge(context, n_steps=n_steps, Neff_cutoff=Neff_cutoff, filename=filename)
Beispiel #7
0
integrator.acceptance_rate
positions = context.getState(getPositions=True).getPositions()
output = integrator.vstep(25)

data = []
for i in range(100000):
    integrator.step(1)
    energy = context.getState(getEnergy=True).getPotentialEnergy() / u.kilojoules_per_mole
    data.append(dict(accept=integrator.accept, energy=energy))

data = pd.DataFrame(data)
energies = data.energy.values

energies.mean()
energies.mean() - E0

g = pymbar.timeseries.statisticalInefficiency(energies)
Neff = len(energies) / g
stderr = energies.std() / (Neff ** 0.5)

data, g, Neff, mu, sigma, stderr = lb_loader.converge(context, n_steps=1, Neff_cutoff=1E4)

data = [dict(energy=E0)]
data = pd.DataFrame(data)
while True:
    integrator.step(1)
    state = context.getState(getEnergy=True)
    energy = state.getPotentialEnergy() / u.kilojoules_per_mole
    current_data = dict(energy=energy)
    data.ix[len(data)] = current_data