Esempio n. 1
0
    def backward_arguments(kwargs, new_args={}):
        """
        update the initialize arguments that were renamed
        """

        if "kernel_name" in kwargs:
            DeprecationWarning("kernel_name is being replaced with kernels")
            new_args["kernels"] = kernel_str_to_array(kwargs["kernel_name"])
            kwargs.pop("kernel_name")
        if "nsample" in kwargs:
            DeprecationWarning("nsample is being replaced with n_sample")
            new_args["n_sample"] = kwargs["nsample"]
            kwargs.pop("nsample")
        if "par" in kwargs:
            DeprecationWarning("par is being replaced with parallel")
            new_args["parallel"] = kwargs["par"]
            kwargs.pop("par")
        if "no_cpus" in kwargs:
            DeprecationWarning("no_cpus is being replaced with n_cpu")
            new_args["n_cpus"] = kwargs["no_cpus"]
            kwargs.pop("no_cpus")
        if "multihyps" in kwargs:
            DeprecationWarning("multihyps is removed")
            kwargs.pop("multihyps")

        return new_args
Esempio n. 2
0
    def update_kernel(
        self,
        kernels: List[str],
        component: str = "mc",
        hyps=None,
        cutoffs: dict = None,
        hyps_mask: dict = None,
    ):
        kernel, grad, ek, efk, _, _, _ = str_to_kernel_set(
            kernels, component, hyps_mask)
        self.kernel = kernel
        self.kernel_grad = grad
        self.energy_force_kernel = efk
        self.energy_kernel = ek
        self.kernels = kernel_str_to_array(kernel.__name__)

        if hyps_mask is not None:
            self.hyps_mask = hyps_mask
        # Cutoffs argument will override hyps mask's cutoffs key, if present
        if isinstance(hyps_mask, dict) and cutoffs is None:
            cutoffs = hyps_mask.get("cutoffs", None)

        if cutoffs is not None:
            if self.cutoffs != cutoffs:
                self.adjust_cutoffs(cutoffs,
                                    train=False,
                                    new_hyps_mask=hyps_mask)
            self.cutoffs = cutoffs

        if isinstance(hyps_mask, dict) and hyps is None:
            hyps = hyps_mask.get("hyps", None)

        if hyps is not None:
            self.hyps = hyps
Esempio n. 3
0
    def backward_arguments(kwargs, new_args={}):
        """
        update the initialize arguments that were renamed
        """

        if 'kernel_name' in kwargs:
            DeprecationWarning(
                "kernel_name is being replaced with kernels")
            new_args['kernels'] = kernel_str_to_array(
                kwargs['kernel_name'])
            kwargs.pop('kernel_name')
        if 'nsample' in kwargs:
            DeprecationWarning("nsample is being replaced with n_sample")
            new_args['n_sample'] = kwargs['nsample']
            kwargs.pop('nsample')
        if 'par' in kwargs:
            DeprecationWarning("par is being replaced with parallel")
            new_args['parallel'] = kwargs['par']
            kwargs.pop('par')
        if 'no_cpus' in kwargs:
            DeprecationWarning("no_cpus is being replaced with n_cpu")
            new_args['n_cpus'] = kwargs['no_cpus']
            kwargs.pop('no_cpus')
        if 'multihyps' in kwargs:
            DeprecationWarning("multihyps is removed")
            kwargs.pop('multihyps')

        return new_args
Esempio n. 4
0
    def __init__(
        self,
        kernels: List[str] = None,
        component: str = "mc",
        hyps: "ndarray" = None,
        cutoffs: dict = None,
        hyps_mask: dict = None,
        hyp_labels: List = None,
        opt_algorithm: str = "L-BFGS-B",
        maxiter: int = 10,
        parallel: bool = False,
        per_atom_par: bool = True,
        n_cpus: int = 1,
        n_sample: int = 100,
        output: Output = None,
        name="default_gp",
        energy_noise: float = 0.01,
        **kwargs,
    ):
        """Initialize GP parameters and training data."""

        # load arguments into attributes
        self.name = name

        self.output = output
        self.opt_algorithm = opt_algorithm

        self.per_atom_par = per_atom_par
        self.maxiter = maxiter

        # set up parallelization
        self.n_cpus = n_cpus
        self.n_sample = n_sample
        self.parallel = parallel

        self.component = component
        self.kernels = (["twobody", "threebody"] if kernels is None else
                        kernel_str_to_array("".join(kernels)))
        self.cutoffs = {} if cutoffs is None else cutoffs
        self.hyp_labels = hyp_labels
        self.hyps_mask = {} if hyps_mask is None else hyps_mask
        self.hyps = hyps

        GaussianProcess.backward_arguments(kwargs, self.__dict__)
        GaussianProcess.backward_attributes(self.__dict__)

        # ------------  "computed" attributes ------------

        if self.output is None:
            self.logger_name = self.name + "GaussianProcess"
            set_logger(self.logger_name,
                       stream=True,
                       fileout_name=None,
                       verbose="info")
        else:
            self.logger_name = self.output.basename + "log"

        if self.hyps is None:
            # If no hyperparameters are passed in, assume 2 hyps for each
            # kernel, plus one noise hyperparameter, and use a guess value
            self.hyps = np.array([0.1] * (1 + 2 * len(self.kernels)))
        else:
            self.hyps = np.array(self.hyps, dtype=np.float64)

        kernel, grad, ek, efk, efs_e, efs_f, efs_self = str_to_kernel_set(
            self.kernels, self.component, self.hyps_mask)
        self.kernel = kernel
        self.kernel_grad = grad
        self.energy_force_kernel = efk
        self.energy_kernel = ek
        self.efs_energy_kernel = efs_e
        self.efs_force_kernel = efs_f
        self.efs_self_kernel = efs_self
        self.kernels = kernel_str_to_array(kernel.__name__)

        # parallelization
        if self.parallel:
            if self.n_cpus is None:
                self.n_cpus = mp.cpu_count()
            else:
                self.n_cpus = n_cpus
        else:
            self.n_cpus = 1

        self.training_data = []  # Atomic environments
        self.training_labels = []  # Forces acting on central atoms
        self.training_labels_np = np.empty(0, )
        self.n_envs_prev = len(self.training_data)

        # Attributes to accomodate energy labels:
        self.training_structures = []  # Environments of each structure
        self.energy_labels = []  # Energies of training structures
        self.energy_labels_np = np.empty(0, )
        self.energy_noise = energy_noise
        self.all_labels = np.empty(0, )

        # Parameters set during training
        self.ky_mat = None
        self.force_block = None
        self.energy_block = None
        self.force_energy_block = None
        self.l_mat = None
        self.l_mat_inv = None
        self.alpha = None
        self.ky_mat_inv = None
        self.likelihood = None
        self.likelihood_gradient = None
        self.bounds = None

        # File used for reading / writing model if model is large
        self.ky_mat_file = None
        # Flag if too-big warning has been printed for this model
        self.large_warning = False

        if self.logger_name is None:
            if self.output is None:
                self.logger_name = self.name + "GaussianProcess"
                set_logger(self.logger_name,
                           stream=True,
                           fileout_name=None,
                           verbose="info")
            else:
                self.logger_name = self.output.basename + "log"
        logger = logging.getLogger(self.logger_name)

        if self.cutoffs == {}:
            # If no cutoffs are passed in, assume 7 A for 2 body, 3.5 for
            # 3-body.
            cutoffs = {}
            if "twobody" in self.kernels:
                cutoffs["twobody"] = 7
            if "threebody" in self.kernels:
                cutoffs["threebody"] = 3.5
            if "manybody" in self.kernels:
                raise ValueError("No cutoff was set for the manybody kernel."
                                 "A default value will not be set by default.")

            self.cutoffs = cutoffs
            logger.warning("Warning: No cutoffs were set for your GP."
                           "Default values have been assigned but you "
                           "should think carefully about which are "
                           "appropriate for your use case.")

        self.check_instantiation()