def generateTrajectory(self, x0, nframes): """ Generate a velocity Verlet trajectory consisting of ntau segments of tau_steps in between storage of Snapshots and randomization of velocities. ARGUMENTS x0 (coordinate set) - initial coordinates; velocities will be assigned from Maxwell-Boltzmann distribution nframes (int) - number of trajectory segments to generate RETURNS trajectory (list of Snapshot) - generated trajectory of initial conditions, including initial coordinate set NOTES This exists in OpenMM. Check with John on how to best get snapshots This routine generates a velocity Verlet trajectory for systems without constraints by wrapping the OpenMM 'VerletIntegrator' in two half-kicks of the velocity. """ # Set initial positions self.context.setPositions(x0) # Store initial state for each trajectory segment in trajectory. trajectory = Trajectory() # Construct mass vector. nparticles = self.system.getNumParticles() mass = Quantity(numpy.zeros([nparticles, 3], numpy.float64), amu) for particle_index in range(nparticles): mass[particle_index, :] = self.system.getParticleMass(particle_index) # Assign velocities from Maxwell-Boltzmann distribution self.context.setVelocitiesToTemperature(self.temperature) # Store initial snapshot of trajectory segment. snapshot = Snapshot(context=self.context) trajectory.forward(snapshot) # Propagate dynamics by velocity Verlet. in_void = True self.frame = 0 while self.frame < self.xframes and in_void == True: # Do integrator x steps self.integrator.step() # Check if reached a core set in_void = self.check_void() # Store final snapshot of trajectory. snapshot = Snapshot(self.context) trajectory.forward(snapshot) return trajectory
def split_into_connections(self, traj): c, i, d = self.assign_trajectory(traj) l_traj = [] traj_mode = 0 t = Trajectory() first = None for index, snapshot in enumerate(traj): if (c[index] != -1 and i[index]==0 and traj_mode == 0): # core set first = snapshot elif (i[index] != 0 and first is not None): # in void t.forward(snapshot) traj_mode = 1 elif (c[index] != -1 and i[index]==0 and traj_mode == 1): t.insert(0, first) t.forward(snapshot) l_traj.forward(t) t = Trajectory() first = snapshot traj_mode = 0 return l_traj
def _resume_from_netcdf(self): """ Resume execution by reading current positions and energies from a NetCDF file. """ # Open NetCDF file for reading # ncfile = netcdf.NetCDFFile(self.store_filename, 'r') # for Scientific.IO.NetCDF ncfile = netcdf.Dataset(self.store_filename, "r") # for netCDF4 # TODO: Perform sanity check on file before resuming # Get current dimensions. self.iteration = ncfile.variables["activities"].shape[0] - 1 self.nstates = ncfile.variables["activities"].shape[1] self.n_frames = ncfile.variables["trajectory_coordinates"].shape[1] self.natoms = ncfile.variables["trajectory_coordinates"].shape[2] print "iteration = %d, nstates = %d, natoms = %d" % (self.iteration, self.nstates, self.natoms) # Restore trajectories. self.trajectories = list() for replica_index in range(self.nstates): trajectory = Trajectory() for frame_index in range(self.n_frames): x = ( ncfile.variables["trajectory_coordinates"][replica_index, frame_index, :, :] .astype(numpy.float32) .copy() ) coordinates = Quantity(x, nanometers) v = ( ncfile.variables["trajectory_velocities"][replica_index, frame_index, :, :] .astype(numpy.float32) .copy() ) velocities = Quantity(v, nanometers / picoseconds) V = ncfile.variables["trajectory_potential"][replica_index, frame_index] potential_energy = Quantity(V, kilojoules_per_mole) T = ncfile.variables["trajectory_kinetic"][replica_index, frame_index] kinetic_energy = Quantity(T, kilojoules_per_mole) snapshot = Snapshot( coordinates=coordinates, velocities=velocities, kinetic_energy=kinetic_energy, potential_energy=potential_energy, ) trajectory.forward(snapshot) self.trajectories.forward(trajectory) # Restore state information. self.replica_states = ncfile.variables["states"][self.iteration, :].copy() # Restore log probabilities. print "Reading log probabilities..." # DEBUG print self.log_P_kl # DEBUG self.log_P_kl = ncfile.variables["log_probabilities"][self.iteration, :, :] print self.log_P_kl # DEBUG # Restore activities for replica_index in range(self.nstates): state_index = self.replica_states[replica_index] K_reduced_unit = self.ensembles[state_index].K_reduced_unit K = ncfile.variables["activities"][self.iteration, replica_index] self.activities[replica_index] = K * K_reduced_unit # Restore path Hamiltonians for replica_index in range(self.nstates): state_index = self.replica_states[replica_index] H_reduced_unit = self.ensembles[state_index].H_reduced_unit H = ncfile.variables["path_hamiltonians"][self.iteration, replica_index] self.path_hamiltonians[replica_index] = H * H_reduced_unit # Close NetCDF file. ncfile.close() # Reopen NetCDF file for appending, and maintain handle. # self.ncfile = netcdf.NetCDFFile(self.store_filename, 'a') # for Scientific.IO.NetCDF self.ncfile = netcdf.Dataset(self.store_filename, "a") # for netCDF4 # DEBUG: Set number of iterations to be a bit more than we've done. # self.number_of_iterations = self.iteration + 50 return