Esempio n. 1
0
    def __init__(
        self,
        calculator,
        nprocs: int = 1,
        residual_fn: Optional[Callable] = None,
        residual_data: Optional[Dict[str, Any]] = None,
    ):

        if not torch_avail:
            report_import_error("pytorch")

        default_residual_data = {
            "energy_weight": 1.0,
            "forces_weight": 1.0,
            "stress_weight": 1.0,
            "normalize_by_natoms": True,
        }

        residual_data = _check_residual_data(residual_data, default_residual_data)
        _check_compute_flag(calculator, residual_data)

        self.calculator = calculator
        self.nprocs = nprocs

        self.residual_fn = (
            energy_forces_residual if residual_fn is None else residual_fn
        )
        self.residual_data = residual_data

        self.optimizer = None
        self.optimizer_state_path = None

        logger.debug(f"`{self.__class__.__name__}` instantiated.")
Esempio n. 2
0
    def __init__(
        self,
        model_name: str,
        params_relation_callback: Optional[Callable] = None,
    ):
        if not kimpy_avail:
            report_import_error("kimpy", self.__class__.__name__)

        self.kim_model = self._create_kim_model(model_name)

        super(KIMModel, self).__init__(model_name, params_relation_callback)
Esempio n. 3
0
    def __init__(
        self,
        kim_ca,
        config: Configuration,
        supported_species: Dict[str, int],
        influence_distance: float,
        compute_energy: bool = True,
        compute_forces: bool = True,
        compute_stress: bool = False,
    ):
        if not kimpy_avail:
            report_import_error("kimpy", self.__class__.__name__)

        # kim compute argument
        self.kim_ca = kim_ca

        # get supported property
        self._get_implemented_property()

        super(KIMComputeArguments, self).__init__(
            config,
            supported_species,
            influence_distance,
            compute_energy,
            compute_forces,
            compute_stress,
        )

        # neighbor list
        self.neigh = None
        self.num_contributing_particles = None
        self.num_padding_particles = None
        self.padding_image_of = None

        # model input
        self.num_particles = None
        self.species_code = None
        self.particle_contributing = None
        self.coords = None

        # model output
        self.energy = None
        self.forces = None

        self._init_neigh()
        self._update_neigh(influence_distance)
        self._register_data(compute_energy, compute_forces)
Esempio n. 4
0
    def _scipy_optimize(self, method, **kwargs):
        """
        Minimize the loss use scipy.optimize.least_squares or scipy.optimize.minimize
        methods. A user should not call this function, but should call the ``minimize``
        method.
        """

        size = parallel.get_MPI_world_size()

        if size > 1:
            comm = MPI.COMM_WORLD
            rank = comm.Get_rank()
            logger.info(f"Running in MPI mode with {size} processes.")

            if self.nprocs > 1:
                logger.warning(
                    f"Argument `nprocs = {self.nprocs}` provided at initialization is "
                    f"ignored. When running in MPI mode, the number of processes "
                    f"provided along with the `mpiexec` (or `mpirun`) command is used."
                )

            x = self.calculator.get_opt_params()
            if method in self.scipy_least_squares_methods:
                # geodesic LM
                if method == "geodesiclm":
                    if not geodesicLM_avail:
                        report_import_error("geodesiclm")
                    else:
                        minimize_fn = geodesiclm
                else:
                    minimize_fn = scipy.optimize.least_squares
                func = self._get_residual_MPI

            elif method in self.scipy_minimize_methods:
                minimize_fn = scipy.optimize.minimize
                func = self._get_loss_MPI

            if rank == 0:
                result = minimize_fn(func, x, method=method, **kwargs)
                # notify other process to break func
                break_flag = True
                for i in range(1, size):
                    comm.send(break_flag, dest=i, tag=i)
            else:
                func(x)
                result = None

            result = comm.bcast(result, root=0)

            return result

        else:
            # 1. running MPI with 1 process
            # 2. running without MPI at all
            # both cases are regarded as running without MPI

            if self.nprocs == 1:
                logger.info("Running in serial mode.")
            else:
                logger.info(
                    f"Running in multiprocessing mode with {self.nprocs} processes."
                )

                # Maybe one thinks he is using MPI because nprocs is used
                if mpi4py_avail:
                    logger.warning(
                        "`mpi4y` detected. If you try to run in MPI mode, you should "
                        "execute your code via `mpiexec` (or `mpirun`). If not, ignore "
                        "this message."
                    )

            x = self.calculator.get_opt_params()
            if method in self.scipy_least_squares_methods:
                if method == "geodesiclm":
                    if not geodesicLM_avail:
                        report_import_error("geodesiclm")
                    else:
                        minimize_fn = geodesiclm
                else:
                    minimize_fn = scipy.optimize.least_squares

                func = self._get_residual
            elif method in self.scipy_minimize_methods:
                minimize_fn = scipy.optimize.minimize
                func = self._get_loss

            result = minimize_fn(func, x, method=method, **kwargs)
            return result
Esempio n. 5
0
    def _scipy_optimize(self, method, **kwargs):
        """
        Minimize the loss use scipy.optimize.least_squares or scipy.optimize.minimize
        methods. A user should not call this function, but should call the ``minimize``
        method.
        """

        size = parallel.get_MPI_world_size()

        if size > 1:
            comm = MPI.COMM_WORLD
            rank = comm.Get_rank()

            msg = "Running in MPI mode with {} processes.".format(size)
            log_entry(logger, msg, level="info", print_end="\n\n")

            if self.nprocs > 1:
                msg = (
                    'Argument "nprocs = {}" provided at initialization is ignored. When '
                    "running in MPI mode, the number of processes provided along with "
                    'the "mpiexec" (or "mpirun") command is used.'.format(
                        self.nprocs))
                log_entry(logger, msg, level="warning")

            x = self.calculator.get_opt_params()
            if method in self.scipy_least_squares_methods:
                # geodesic LM
                if method == "geodesiclm":
                    if not geodesicLM_avail:
                        report_import_error("geodesciLM")
                    else:
                        minimize_fn = geodesiclm
                else:
                    minimize_fn = scipy.optimize.least_squares
                func = self._get_residual_MPI

            elif method in self.scipy_minimize_methods:
                minimize_fn = scipy.optimize.minimize
                func = self._get_loss_MPI

            if rank == 0:
                result = minimize_fn(func, x, method=method, **kwargs)
                # notify other process to break func
                break_flag = True
                for i in range(1, size):
                    comm.send(break_flag, dest=i, tag=i)
            else:
                func(x)
                result = None

            result = comm.bcast(result, root=0)

            return result

        else:
            # 1. running MPI with 1 process
            # 2. running without MPI at all
            # both cases are regarded as running without MPI

            if self.nprocs == 1:
                msg = "Running in serial mode."
                log_entry(logger, msg, level="info", print_end="\n\n")
            else:
                msg = "Running in multiprocessing mode with {} processes.".format(
                    self.nprocs)
                log_entry(logger, msg, level="info", print_end="\n\n")

                # Maybe one thinks he is using MPI because nprocs is used
                if mpi4py_avail:
                    msg = (
                        '"mpi4y" detected. If you try to run in MPI mode, you should '
                        'execute your code via "mpiexec" (or "mpirun"). If not, ignore '
                        "this message.")
                    log_entry(logger, msg, level="warning")

            x = self.calculator.get_opt_params()
            if method in self.scipy_least_squares_methods:
                if method == "geodesiclm":
                    from geodesicLM import geodesiclm

                    minimize_fn = geodesiclm
                else:
                    minimize_fn = scipy.optimize.least_squares
                func = self._get_residual
            elif method in self.scipy_minimize_methods:
                minimize_fn = scipy.optimize.minimize
                func = self._get_loss

            result = minimize_fn(func, x, method=method, **kwargs)
            return result