Exemple #1
0
    def trial(self, state: interfaces.IState,
              runner: interfaces.IRunner) -> Tuple[interfaces.IState, bool]:
        """
        Perform a Metropolis trial

        Args:
            starting_state: initial state of system
            runner: runner to evaluate energies

        Returns:
            the system state after Monte Carlo trials
        """
        starting_positions = state.positions.copy()
        starting_energy = state.energy

        trial_positions = starting_positions.copy()
        random_vector = np.random.normal(loc=0.0, scale=self.move_size, size=3)
        trial_positions[self.atom_indices, :] += random_vector
        state.positions = trial_positions
        trial_energy = runner.get_energy(state)

        accepted = _metropolis(starting_energy, trial_energy, bias=0.0)
        if accepted:
            state.energy = trial_energy
            state.positions = trial_positions
        else:
            state.energy = starting_energy
            state.positions = starting_positions

        return state, accepted
Exemple #2
0
    def trial(self, state: interfaces.IState,
              runner: interfaces.IRunner) -> Tuple[interfaces.IState, bool]:
        """
        Perform a Metropolis trial

        Args:
            starting_state: initial state of system
            runner: runner to evaluate energies

        Returns:
            the system state after Monte Carlo trials
        """
        starting_positions = state.positions.copy()
        starting_energy = state.energy

        angle = _generate_uniform_angle()
        trial_positions = starting_positions.copy()
        trial_positions[self.atom_indices, :] = _rotate_around_vector(
            starting_positions[self.index1, :],
            starting_positions[self.index2, :],
            angle,
            starting_positions[self.atom_indices, :],
        )
        state.positions = trial_positions
        trial_energy = runner.get_energy(state)

        accepted = _metropolis(starting_energy, trial_energy, 0.0)
        if accepted:
            state.energy = trial_energy
            state.positions = trial_positions
        else:
            state.energy = starting_energy
            state.positions = starting_positions

        return state, accepted
Exemple #3
0
    def _permute_states(
        permutation_matrix: List[int],
        states: Sequence[interfaces.IState],
        system_runner: interfaces.IRunner,
    ) -> Sequence[interfaces.IState]:
        old_coords = [s.positions for s in states]
        old_velocities = [s.velocities for s in states]
        old_box_vectors = [s.box_vector for s in states]
        old_energy = [s.energy for s in states]
        old_params = [s.parameters for s in states]
        old_mappings = [s.mappings for s in states]
        assert system_runner.temperature_scaler is not None
        temperatures = [
            system_runner.temperature_scaler(s.alpha) for s in states
        ]

        for i, index in enumerate(permutation_matrix):
            states[i].positions = old_coords[index]
            states[i].velocities = (
                math.sqrt(temperatures[i] / temperatures[index]) *
                old_velocities[index])
            states[i].box_vector = old_box_vectors[index]
            states[i].energy = old_energy[index]
            states[i].parameters = old_params[index]
            states[i].mappings = old_mappings[index]
        return states
Exemple #4
0
    def run(self, communicator: interfaces.ICommunicator,
            system_runner: interfaces.IRunner) -> None:
        """
        Continue running worker jobs until done.

        Args:
            communicator: a communicator object for talking to the leader
            system_runner: a system runner object for actually running the simulations
        """
        # we always minimize when we first start, either on the first
        # stage or the first stage after a restart
        minimize = True
        while self._step <= self._max_steps:
            # update simulation conditions
            state = communicator.receive_state_from_leader()
            new_alpha = communicator.receive_alpha_from_leader()

            state.alpha = new_alpha

            system_runner.prepare_for_timestep(state, new_alpha, self._step)

            # do one round of simulation
            if minimize:
                state = system_runner.minimize_then_run(state)
                minimize = False  # we don't need to minimize again
            else:
                state = system_runner.run(state)

            # compute energies
            states = communicator.exchange_states_for_energy_calc(state)

            energies = []
            for state in states:
                energy = system_runner.get_energy(state)
                energies.append(energy)
            communicator.send_energies_to_leader(energies)

            self._step += 1
Exemple #5
0
    def run(
        self,
        communicator: interfaces.ICommunicator,
        system_runner: interfaces.IRunner,
        store: vault.DataStore,
    ):
        """
        Run replica exchange until finished

        Args:
            communicator: a communicator object to talk with workers
            system_runner: a interfaces.IRunner object to run the simulations
            store: a store object to handle storing data to disk
        """
        logger.info("Beginning replica exchange")
        # check to make sure n_replicas matches
        assert self._n_replicas == communicator.n_replicas
        assert self._n_replicas == store.n_replicas

        # load previous state from the store
        states = store.load_states(stage=self.step - 1)

        # we always minimize when we first start, either on the first
        # stage or the first stage after a restart
        minimize = True

        while self._step <= self._max_steps:
            logger.info("Running replica exchange step %d of %d.", self._step,
                        self._max_steps)

            # communicate state
            my_state = communicator.broadcast_states_to_workers(states)

            # update alphas
            system_runner.prepare_for_timestep(my_state, 0.0, self._step)
            self._alphas = self.adaptor.adapt(self._alphas, self._step)
            communicator.broadcast_alphas_to_workers(self._alphas)

            # do one step
            if minimize:
                logger.info("First step, minimizing and then running.")
                my_state = system_runner.minimize_then_run(my_state)
                minimize = False  # we don't need to minimize again
            else:
                logger.info("Running molecular dynamics.")
                my_state = system_runner.run(my_state)

            # gather all of the states
            states = communicator.exchange_states_for_energy_calc(my_state)

            # compute our energy for each state
            my_energies = self._compute_energies(states, system_runner)
            energies = communicator.gather_energies_from_workers(my_energies)

            # ask the ladder how to permute things
            permutation_vector = self.ladder.compute_exchanges(
                energies, self.adaptor)
            states = self._permute_states(permutation_vector, states,
                                          system_runner)

            # store everything
            store.save_states(states, self.step)
            store.append_traj(states[0], self.step)
            store.save_alphas(np.array(self._alphas), self.step)
            store.save_permutation_vector(permutation_vector, self.step)
            store.save_energy_matrix(energies, self.step)
            store.save_acceptance_probabilities(
                self.adaptor.get_acceptance_probabilities(), self.step)
            store.save_data_store()

            # on to the next step!
            self._step += 1
            store.save_remd_runner(self)
            store.backup(self.step - 1)
        logger.info("Finished %d steps of replica exchange successfully.",
                    self._max_steps)
Exemple #6
0
 def _compute_energies(states: Sequence[interfaces.IState],
                       system_runner: interfaces.IRunner) -> List[float]:
     my_energies = []
     for state in states:
         my_energies.append(system_runner.get_energy(state))
     return my_energies