Ejemplo n.º 1
0
 def energy(self):
     xyz = utils.get_simulation_xyz(self._simulation)
     snapshot = mdtraj.Trajectory(xyz, self.topology)
     self._prior_energy = self._simulation.context.getState(getEnergy=True).getPotentialEnergy()._value
     self._potential_energy = self.potential(snapshot)
     self._total_energy = self._potential_energy + self._prior_energy
     return self._total_energy
Ejemplo n.º 2
0
 def energy(self):
     xyz = utils.get_simulation_xyz(self._simulation)
     snapshot = mdtraj.Trajectory(xyz, self.topology)
     self._prior_energy = self._simulation.context.getState(
         getEnergy=True).getPotentialEnergy()._value
     self._potential_energy = self.potential(snapshot)
     self._total_energy = self._potential_energy + self._prior_energy
     return self._total_energy
Ejemplo n.º 3
0
    def sample(self, num_moves, output_target):
        """
        Run the sampler for `num_moves`.
        
        Parameteres
        -----------
        num_moves : int
            The number of moves to generate.
            
        output_target : file
            A file object that serialized results will be sent to. Could be
            a pipe or something similar.
            
        Returns
        -------
        sample : mdtraj.trajectory
            An `num_moves` length trajectory, sampled from the potential
            assoicated with the 
        """
        
        # need to figure out how we're going to deal with the traj data -- cant
        # keep it all in memory. Would be good to dump it into a single h5 db
        reporter = reporters.HDF5Reporter(output_target, 
                                          self.mc_length_increment, 
                                          coordinates=True, time=False, cell=False,
                                          potentialEnergy=False, kineticEnergy=False,
                                          temperature=False, velocities=False,
                                          atomSubset=None)
        self._simulation.reporters.append(reporter)

        # perform the actual monte carlo
        current_energy = self.energy
        
        logger.debug('PRIOR     POTNT     NEW       CRRNT     ACCPT     RATIO')
        for mi in range(num_moves):
            
            # step forward in time
            self.total_moves_attempted += 1
            old_pos = self._simulation.context.getState(getPositions=True).getPositions()
            self._simulation.step(self.steps_per_iter)

            # accept or reject according to Metropolis
            new_energy = self.energy

            # accept
            if (new_energy < current_energy):
                accept_str = 'Y-lwr' # move accepted, new energy lower
                self.accepted += 1
                self.positions = utils.get_simulation_xyz(self._simulation)
                current_energy = new_energy

            elif (np.random.rand() < np.exp( (current_energy - new_energy) / (k_boltz * self.temperature))):
                accept_str = 'Y-rnd' # move accepted due to random chance
                self.accepted += 1
                self.positions = utils.get_simulation_xyz(self._simulation)
                current_energy = new_energy
                
            # reject
            else:
                accept_str = 'N'
                self._simulation.context.setPositions(old_pos)

                
            # perform adaptive update on the number of steps performed per attempt
            # to try and get to the `target_accept_percent` acceptance ratio
            self.accepted_ratio = float(self.accepted) / float(self.total_moves_attempted)
            if self.accepted_ratio > self.target_accept_percent:
                self.steps_per_iter /= self.scaling_speed
            else:
                self.steps_per_iter *= self.scaling_speed
            self.steps_per_iter = int(self.steps_per_iter)
                
            self.steps_per_iter -= self.steps_per_iter % self.mc_length_increment
            self.steps_per_iter = max(self.steps_per_iter, self.mc_length_increment)

            report_str = '%.2e  ' * 4 + '%s  %.2f'
            logger.debug(report_str % (self._prior_energy,
                                       self._potential_energy,
                                       new_energy,
                                       current_energy,
                                       accept_str,
                                       self.accepted_ratio))
        
        # close repoter
        reporter.close()

        return
Ejemplo n.º 4
0
    def sample(self, num_moves, output_target):
        """
        Run the sampler for `num_moves`.
        
        Parameteres
        -----------
        num_moves : int
            The number of moves to generate.
            
        output_target : file
            A file object that serialized results will be sent to. Could be
            a pipe or something similar.
            
        Returns
        -------
        sample : mdtraj.trajectory
            An `num_moves` length trajectory, sampled from the potential
            assoicated with the 
        """

        # need to figure out how we're going to deal with the traj data -- cant
        # keep it all in memory. Would be good to dump it into a single h5 db
        reporter = reporters.HDF5Reporter(output_target,
                                          self.mc_length_increment,
                                          coordinates=True,
                                          time=False,
                                          cell=False,
                                          potentialEnergy=False,
                                          kineticEnergy=False,
                                          temperature=False,
                                          velocities=False,
                                          atomSubset=None)
        self._simulation.reporters.append(reporter)

        # perform the actual monte carlo
        current_energy = self.energy

        logger.debug('PRIOR     POTNT     NEW       CRRNT     ACCPT     RATIO')
        for mi in range(num_moves):

            # step forward in time
            self.total_moves_attempted += 1
            old_pos = self._simulation.context.getState(
                getPositions=True).getPositions()
            self._simulation.step(self.steps_per_iter)

            # accept or reject according to Metropolis
            new_energy = self.energy

            # accept
            if (new_energy < current_energy):
                accept_str = 'Y-lwr'  # move accepted, new energy lower
                self.accepted += 1
                self.positions = utils.get_simulation_xyz(self._simulation)
                current_energy = new_energy

            elif (np.random.rand() < np.exp(
                (current_energy - new_energy) / (k_boltz * self.temperature))):
                accept_str = 'Y-rnd'  # move accepted due to random chance
                self.accepted += 1
                self.positions = utils.get_simulation_xyz(self._simulation)
                current_energy = new_energy

            # reject
            else:
                accept_str = 'N'
                self._simulation.context.setPositions(old_pos)

            # perform adaptive update on the number of steps performed per attempt
            # to try and get to the `target_accept_percent` acceptance ratio
            self.accepted_ratio = float(self.accepted) / float(
                self.total_moves_attempted)
            if self.accepted_ratio > self.target_accept_percent:
                self.steps_per_iter /= self.scaling_speed
            else:
                self.steps_per_iter *= self.scaling_speed
            self.steps_per_iter = int(self.steps_per_iter)

            self.steps_per_iter -= self.steps_per_iter % self.mc_length_increment
            self.steps_per_iter = max(self.steps_per_iter,
                                      self.mc_length_increment)

            report_str = '%.2e  ' * 4 + '%s  %.2f'
            logger.debug(
                report_str %
                (self._prior_energy, self._potential_energy, new_energy,
                 current_energy, accept_str, self.accepted_ratio))

        # close repoter
        reporter.close()

        return