def run_pyace_fit(self, bbasisconfig: BBasisConfiguration, dataframe: pd.DataFrame, loss_spec: LossFunctionSpecification, fit_config: Dict) -> BBasisConfiguration: parallel_mode = self.backend_config.get(BACKEND_PARALLEL_MODE_KW) or "serial" batch_size = len(dataframe) log.info("Loss function specification: " + str(loss_spec)) display_step = self.backend_config.get('display_step', 20) self.fitter = PyACEFit(basis=bbasisconfig, loss_spec=loss_spec, executors_kw_args=dict(parallel_mode=parallel_mode, batch_size=batch_size, n_workers=self.backend_config.get(BACKEND_NWORKERS_KW, None) ), seed=42, display_step=display_step) maxiter = fit_config.get(FIT_NITER_KW, 100) fit_options = fit_config.get(FIT_OPTIONS_KW, {}) options = {"maxiter": maxiter, "disp": True} options.update(fit_options) self.fitter.fit(structures_dataframe=dataframe, method=fit_config[FIT_OPTIMIZER_KW], options=options, callback=self._callback) # TODO: options=self.fit_config[FIT_OPTIMIZER_OPTIONS_KW] self.res_opt = self.fitter.res_opt new_bbasisconf = self.fitter.bbasis_opt.to_BBasisConfiguration() bbasisconfig.set_all_coeffs(new_bbasisconf.get_all_coeffs()) return bbasisconfig
def run_tensorpot_fit(self, bbasisconfig: BBasisConfiguration, dataframe: pd.DataFrame, loss_spec: LossFunctionSpecification, fit_config: Dict) -> BBasisConfiguration: from tensorpotential.potentials.ace import ACE from tensorpotential.tensorpot import TensorPotential from tensorpotential.fit import FitTensorPotential from tensorpotential.utils.utilities import batching_data from tensorpotential.constants import (LOSS_TYPE, LOSS_FORCE_FACTOR, LOSS_ENERGY_FACTOR, L1_REG, L2_REG, AUX_LOSS_FACTOR) batch_size = self.backend_config.get(BACKEND_BATCH_SIZE_KW, 10) log.info("Loss function specification: " + str(loss_spec)) log.info("Batch size: {}".format(batch_size)) batches = batching_data(dataframe, batch_size=batch_size) # max_bytes = self.adjust_batch_size(dataframe, bbasisconfig, ini_batch_size=batch_size) n_batches = len(batches) if loss_spec.w1_coeffs != 1.0 or loss_spec.w2_coeffs != 1.0: log.warning("WARNING! 'w1_coeffs'={} and 'w2_coeffs'={} in loss function will be ignored". format(loss_spec.w1_coeffs, loss_spec.w2_coeffs)) loss_force_factor = loss_spec.kappa if (np.array([loss_spec.w0_rad, loss_spec.w1_rad, loss_spec.w2_rad]) != 0).any(): ace_potential = ACE(bbasisconfig, compute_smoothness=True) tensorpotential = TensorPotential(ace_potential,loss_specs={ LOSS_TYPE: 'per-atom', LOSS_FORCE_FACTOR: loss_force_factor, LOSS_ENERGY_FACTOR: (1-loss_force_factor), L1_REG: np.float64(loss_spec.L1_coeffs) / n_batches, L2_REG: np.float64(loss_spec.L2_coeffs) / n_batches, AUX_LOSS_FACTOR: [np.float64(loss_spec.w0_rad) / n_batches, np.float64(loss_spec.w1_rad) / n_batches, np.float64(loss_spec.w2_rad) / n_batches]}) else: ace_potential = ACE(bbasisconfig, compute_smoothness=False) tensorpotential = TensorPotential(ace_potential, loss_specs={ LOSS_TYPE: 'per-atom', LOSS_FORCE_FACTOR: loss_force_factor, LOSS_ENERGY_FACTOR: (1 - loss_force_factor), L1_REG: np.float64(loss_spec.L1_coeffs) / n_batches, L2_REG: np.float64(loss_spec.L2_coeffs) / n_batches}) display_step = self.backend_config.get('display_step', 20) self.fitter = FitTensorPotential(tensorpotential, display_step=display_step) fit_options = fit_config.get(FIT_OPTIONS_KW, None) self.fitter.fit(dataframe, niter=fit_config[FIT_NITER_KW], optimizer=fit_config[FIT_OPTIMIZER_KW], batch_size=batch_size, jacobian_factor=None, callback=self._callback, options=fit_options) self.res_opt = self.fitter.res_opt coeffs = self.fitter.get_fitted_coefficients() bbasisconfig.set_all_coeffs(coeffs) return bbasisconfig
def safely_update_bbasisconfiguration_coefficients(coeffs: np.array, config: BBasisConfiguration = None) -> None: current_coeffs = config.get_all_coeffs() for i, c in enumerate(coeffs): current_coeffs[i] = c config.set_all_coeffs(current_coeffs)