Esempio n. 1
0
    def trial(self, state: interfaces.IState,
              runner: interfaces.IRunner) -> Tuple[interfaces.IState, bool]:
        """
        Perform a Metropolis trial

        Args:
            starting_state: initial state of system
            runner: runner to evaluate energies

        Returns:
            the system state after Monte Carlo trials
        """
        starting_positions = state.positions.copy()
        starting_energy = state.energy

        angle = _generate_uniform_angle()
        trial_positions = starting_positions.copy()
        trial_positions[self.atom_indices, :] = _rotate_around_vector(
            starting_positions[self.index1, :],
            starting_positions[self.index2, :],
            angle,
            starting_positions[self.atom_indices, :],
        )
        state.positions = trial_positions
        trial_energy = runner.get_energy(state)

        accepted = _metropolis(starting_energy, trial_energy, 0.0)
        if accepted:
            state.energy = trial_energy
            state.positions = trial_positions
        else:
            state.energy = starting_energy
            state.positions = starting_positions

        return state, accepted
Esempio n. 2
0
    def trial(self, state: interfaces.IState,
              runner: interfaces.IRunner) -> Tuple[interfaces.IState, bool]:
        """
        Perform a Metropolis trial

        Args:
            starting_state: initial state of system
            runner: runner to evaluate energies

        Returns:
            the system state after Monte Carlo trials
        """
        starting_positions = state.positions.copy()
        starting_energy = state.energy

        trial_positions = starting_positions.copy()
        random_vector = np.random.normal(loc=0.0, scale=self.move_size, size=3)
        trial_positions[self.atom_indices, :] += random_vector
        state.positions = trial_positions
        trial_energy = runner.get_energy(state)

        accepted = _metropolis(starting_energy, trial_energy, bias=0.0)
        if accepted:
            state.energy = trial_energy
            state.positions = trial_positions
        else:
            state.energy = starting_energy
            state.positions = starting_positions

        return state, accepted
Esempio n. 3
0
    def run(self, communicator: interfaces.ICommunicator,
            system_runner: interfaces.IRunner) -> None:
        """
        Continue running worker jobs until done.

        Args:
            communicator: a communicator object for talking to the leader
            system_runner: a system runner object for actually running the simulations
        """
        # we always minimize when we first start, either on the first
        # stage or the first stage after a restart
        minimize = True
        while self._step <= self._max_steps:
            # update simulation conditions
            state = communicator.receive_state_from_leader()
            new_alpha = communicator.receive_alpha_from_leader()

            state.alpha = new_alpha

            system_runner.prepare_for_timestep(state, new_alpha, self._step)

            # do one round of simulation
            if minimize:
                state = system_runner.minimize_then_run(state)
                minimize = False  # we don't need to minimize again
            else:
                state = system_runner.run(state)

            # compute energies
            states = communicator.exchange_states_for_energy_calc(state)

            energies = []
            for state in states:
                energy = system_runner.get_energy(state)
                energies.append(energy)
            communicator.send_energies_to_leader(energies)

            self._step += 1
Esempio n. 4
0
 def _compute_energies(states: Sequence[interfaces.IState],
                       system_runner: interfaces.IRunner) -> List[float]:
     my_energies = []
     for state in states:
         my_energies.append(system_runner.get_energy(state))
     return my_energies