import numpy as np from ase.cluster.icosahedron import Icosahedron from ase.optimize import BFGS import torch import os import copy # Set up ensemble parallelization if __name__ == "__main__": # import make_ensemble and dask for setting parallelization from al_mlp.ml_potentials.amptorch_ensemble_calc import AmptorchEnsembleCalc from dask.distributed import Client, LocalCluster cluster = LocalCluster(processes=True, threads_per_worker=1) client = Client(cluster) AmptorchEnsembleCalc.set_executor(client) # Set up parent calculator and image environment initial_structure = Icosahedron("Cu", 2) initial_structure.rattle(0.1) initial_structure.set_pbc(True) initial_structure.set_cell([20, 20, 20]) images = [] elements = ["Cu"] parent_calc = EMT() # Run relaxation with active learning OAL_initial_structure = initial_structure.copy() OAL_initial_structure.set_calculator(copy.deepcopy(parent_calc)) OAL_relaxation = Relaxation(OAL_initial_structure, BFGS,
def run_delta_al(atomistic_method, images, elements, dbname, parent_calc): Gs = { "default": { "G2": { "etas": np.logspace(np.log10(0.05), np.log10(5.0), num=4), "rs_s": [0], }, "G4": {"etas": [0.005], "zetas": [1.0, 4.0], "gammas": [1.0, -1.0]}, "cutoff": 6, }, } learner_params = { "max_iterations": 10, "samples_to_retrain": 1, "filename": "relax_example", "file_dir": "./", "stat_uncertain_tol": 0.15, "dyn_uncertain_tol": 1.5, "fmax_verify_threshold": 0.05, # eV/AA "relative_variance": True, "n_ensembles": 5, "use_dask": True, } config = { "model": {"get_forces": True, "num_layers": 3, "num_nodes": 5}, "optim": { "device": "cpu", "force_coefficient": 4.0, "lr": 1e-2, "batch_size": 10, "epochs": 100, "optimizer": torch.optim.LBFGS, "optimizer_args": {"optimizer__line_search_fn": "strong_wolfe"}, }, "dataset": { "raw_data": images, "val_split": 0, "elements": elements, "fp_params": Gs, "save_fps": False, "scaling": {"type": "standardize"}, }, "cmd": { "debug": False, "run_dir": "./", "seed": 1, "identifier": "test", "verbose": False, # "logger": True, "single-threaded": True, }, } trainer = AtomsTrainer(config) base_calc = MultiMorse(images=images, cutoff=Gs["default"]["cutoff"]) ml_potential = AmptorchEnsembleCalc(trainer, learner_params["n_ensembles"]) deltacalc = DeltaLearner( learner_params, images, ml_potential, parent_calc, base_calc, ) if os.path.exists("dft_calls.db"): os.remove("dft_calls.db") atomistic_method.run(deltacalc, filename=dbname, replay_traj=True) return deltacalc, atomistic_method
def run_offline_al(atomistic_method, images, dbname, parent_calc): Gs = { "default": { "G2": { "etas": np.logspace(np.log10(0.05), np.log10(5.0), num=4), "rs_s": [0] * 4, }, "G4": { "etas": [0.005], "zetas": [1.0, 4.0], "gammas": [1.0, -1.0] }, "cutoff": 6, }, } elements = np.unique(images[0].get_chemical_symbols()) learner_params = { "atomistic_method": atomistic_method, "max_iterations": 10, "force_tolerance": 0.01, "samples_to_retrain": 1, "filename": "relax_example", "file_dir": "./", "query_method": "random", "use_dask": False, "max_evA": 0.05, "n_ensembles": 3, } config = { "model": { "get_forces": True, "num_layers": 3, "num_nodes": 20, }, "optim": { "device": "cpu", "force_coefficient": 30, "lr": 1, "batch_size": 10, "epochs": 100, # was 100 "loss": "mse", "metric": "mae", "optimizer": torch.optim.LBFGS, "optimizer_args": { "optimizer__line_search_fn": "strong_wolfe" }, }, "dataset": { "raw_data": images, "val_split": 0, "elements": elements, "fp_params": Gs, "save_fps": False, "scaling": { "type": "normalize", "range": (-1, 1) }, }, "cmd": { "debug": False, "run_dir": "./", "seed": 1, "identifier": "test", "verbose": True, # "logger": True, "single-threaded": True, }, } trainer = AtomsTrainer(config) cutoff = Gs["default"]["cutoff"] base_calc = MultiMorse(images, cutoff, combo="mean") ml_potential = AmptorchEnsembleCalc(trainer, learner_params["n_ensembles"]) learner = RestrictedUncertaintyLearner( learner_params, images, ml_potential, parent_calc, base_calc, ) learner.learn() trained_calc = learner.trained_calc al_iterations = learner.iterations - 1 file_path = learner_params["file_dir"] + learner_params["filename"] final_ml_traj = ase.io.read( "{}_iter_{}.traj".format(file_path, al_iterations), ":") if os.path.exists("dft_calls.db"): os.remove("dft_calls.db") # atomistic_method.run(learner, filename=dbname) return learner, trained_calc, final_ml_traj
from al_mlp.tests.cases.online_CuNP_test import online_CuNP from al_mlp.tests.cases.offline_CuNP_test import offline_CuNP from al_mlp.tests.cases.online_flare_CuNP_test import online_flare_CuNP from al_mlp.tests.cases.delta_CuNP_test import delta_CuNP from al_mlp.tests.cases.offline_uncertainty_CuNP_test import offline_uncertainty_CuNP from al_mlp.tests.cases.online_ft_CuNP_test import online_ft_CuNP # import make_ensemble and dask for setting parallelization from al_mlp.ml_potentials.amptorch_ensemble_calc import AmptorchEnsembleCalc from dask.distributed import Client, LocalCluster # Set dask client in ensemble calc if __name__ == "__main__": cluster = LocalCluster(processes=True, threads_per_worker=1) client = Client(cluster) AmptorchEnsembleCalc.set_executor(client) # initialize the test suite loader = unittest.TestLoader() suite = unittest.TestSuite() # add tests to the test suite suite.addTests(loader.loadTestsFromTestCase(offline_CuNP)) suite.addTests(loader.loadTestsFromTestCase(online_CuNP)) suite.addTests(loader.loadTestsFromTestCase(offline_NEB)) suite.addTests(loader.loadTestsFromModule(online_flare_CuNP)) suite.addTests(loader.loadTestsFromTestCase(delta_CuNP)) suite.addTests(loader.loadTestsFromTestCase(offline_uncertainty_CuNP)) suite.addTests(loader.loadTestsFromTestCase(online_ft_CuNP)) # suite.addTests(loader.loadTestsFromTestCase(online_PtNP)) # add more tests here
"cmd": { "debug": False, "run_dir": "./", "seed": 1, "identifier": "test", "verbose": True, # "logger": True, "single-threaded": True, }, } trainer = AtomsTrainer(config) # building base morse calculator as base calculator cutoff = Gs["default"]["cutoff"] base_calc = MultiMorse(images, cutoff, combo="mean") ml_potential = AmptorchEnsembleCalc(trainer, learner_params["n_ensembles"]) learner = EnsembleLearner( learner_params, images, trainer, parent_calc, base_calc, ml_potential, ) learner.learn() # Calculate true relaxation al_iterations = learner.iterations - 1 file_path = learner_params["file_dir"] + learner_params["filename"] true_relax = Relaxation(slab, BFGS, fmax=0.01)