Пример #1
0
    "cmd": {
        "debug": False,
        "run_dir": "./",
        "seed": 1,
        "identifier": "test",
        "verbose": True,
        "logger": False,
    },
}

trainer = AtomsTrainer(config)

# building base morse calculator as base calculator
cutoff = Gs["default"]["cutoff"]

base_calc = MultiMorse(images, cutoff, combo="mean")

# define learner_params OfflineActiveLearner

learner_params = {
    "atomistic_method":
    Relaxation(initial_geometry=slab.copy(),
               optimizer=BFGS,
               fmax=0.01,
               steps=100),
    "max_iterations":
    10,
    "samples_to_retrain":
    2,
    "filename":
    "example",
Пример #2
0
def run_delta_al(atomistic_method, images, elements, dbname, parent_calc):

    Gs = {
        "default": {
            "G2": {
                "etas": np.logspace(np.log10(0.05), np.log10(5.0), num=4),
                "rs_s": [0],
            },
            "G4": {"etas": [0.005], "zetas": [1.0, 4.0], "gammas": [1.0, -1.0]},
            "cutoff": 6,
        },
    }

    learner_params = {
        "max_iterations": 10,
        "samples_to_retrain": 1,
        "filename": "relax_example",
        "file_dir": "./",
        "stat_uncertain_tol": 0.15,
        "dyn_uncertain_tol": 1.5,
        "fmax_verify_threshold": 0.05,  # eV/AA
        "relative_variance": True,
        "n_ensembles": 5,
        "use_dask": True,
    }

    config = {
        "model": {"get_forces": True, "num_layers": 3, "num_nodes": 5},
        "optim": {
            "device": "cpu",
            "force_coefficient": 4.0,
            "lr": 1e-2,
            "batch_size": 10,
            "epochs": 100,
            "optimizer": torch.optim.LBFGS,
            "optimizer_args": {"optimizer__line_search_fn": "strong_wolfe"},
        },
        "dataset": {
            "raw_data": images,
            "val_split": 0,
            "elements": elements,
            "fp_params": Gs,
            "save_fps": False,
            "scaling": {"type": "standardize"},
        },
        "cmd": {
            "debug": False,
            "run_dir": "./",
            "seed": 1,
            "identifier": "test",
            "verbose": False,
            # "logger": True,
            "single-threaded": True,
        },
    }

    trainer = AtomsTrainer(config)

    base_calc = MultiMorse(images=images, cutoff=Gs["default"]["cutoff"])

    ml_potential = AmptorchEnsembleCalc(trainer, learner_params["n_ensembles"])
    deltacalc = DeltaLearner(
        learner_params,
        images,
        ml_potential,
        parent_calc,
        base_calc,
    )

    if os.path.exists("dft_calls.db"):
        os.remove("dft_calls.db")
    atomistic_method.run(deltacalc, filename=dbname, replay_traj=True)

    return deltacalc, atomistic_method
def run_offline_al(atomistic_method, images, dbname, parent_calc):

    Gs = {
        "default": {
            "G2": {
                "etas": np.logspace(np.log10(0.05), np.log10(5.0), num=4),
                "rs_s": [0] * 4,
            },
            "G4": {"etas": [0.005], "zetas": [1.0, 4.0], "gammas": [1.0, -1.0]},
            "cutoff": 6,
        },
    }

    elements = np.unique(images[0].get_chemical_symbols())

    learner_params = {
        "atomistic_method": atomistic_method,
        "max_iterations": 10,
        "force_tolerance": 0.01,
        "samples_to_retrain": 2,
        "filename": "relax_example",
        "file_dir": "./",
        "query_method": "random",
        "use_dask": False,
        "max_evA": 0.05,
    }

    config = {
        "model": {
            "get_forces": True,
            "num_layers": 3,
            "num_nodes": 20,
        },
        "optim": {
            "device": "cpu",
            "force_coefficient": 30,
            "lr": 1,
            "batch_size": 10,
            "epochs": 100,  # was 100
            "loss": "mse",
            "metric": "mae",
            "optimizer": torch.optim.LBFGS,
            "optimizer_args": {"optimizer__line_search_fn": "strong_wolfe"},
        },
        "dataset": {
            "raw_data": images,
            "val_split": 0,
            "elements": elements,
            "fp_params": Gs,
            "save_fps": False,
            "scaling": {"type": "normalize", "range": (-1, 1)},
        },
        "cmd": {
            "debug": False,
            "run_dir": "./",
            "seed": 1,
            "identifier": "test",
            "verbose": True,
            # "logger": True,
            "single-threaded": True,
        },
    }

    trainer = AtomsTrainer(config)
    cutoff = Gs["default"]["cutoff"]
    base_calc = MultiMorse(images, cutoff, combo="mean")

    learner = FmaxLearner(
        learner_params,
        images,
        trainer,
        parent_calc,
        base_calc,
    )

    learner.learn()
    trained_calc = learner.trained_calc
    al_iterations = learner.iterations - 1
    file_path = learner_params["file_dir"] + learner_params["filename"]
    final_ml_traj = ase.io.read("{}_iter_{}.traj".format(file_path, al_iterations), ":")

    if os.path.exists("dft_calls.db"):
        os.remove("dft_calls.db")
    # atomistic_method.run(learner, filename=dbname)

    return learner, trained_calc, final_ml_traj
Пример #4
0
def offline_neb(images, parent_calc, iter=4, intermediate_images=3):
    torch.set_num_threads(1)

    parent_calc = parent_calc

    Gs = {
        "default": {
            "G2": {
                "etas": np.logspace(np.log10(0.05), np.log10(5.0), num=4),
                "rs_s": [0],
            },
            "G4": {
                "etas": [0.005],
                "zetas": [1.0, 4.0],
                "gammas": [1.0, -1.0]
            },
            "cutoff": 5.0,
        },
    }

    elements = ["Cu", "C"]
    config = {
        "model": {
            "get_forces": True,
            "num_layers": 3,
            "num_nodes": 20,
            "activation": Tanh,
        },
        "optim": {
            "device": "cpu",
            "force_coefficient": 27,
            "lr": 1e-2,
            "batch_size": 1000,
            "epochs": 300,
            "loss": "mse",
            "metric": "mse",
            "optimizer": torch.optim.LBFGS,
            "optimizer_args": {
                "optimizer__line_search_fn": "strong_wolfe"
            },
            "scheduler": {
                "policy": torch.optim.lr_scheduler.CosineAnnealingWarmRestarts,
                "params": {
                    "T_0": 10,
                    "T_mult": 2
                },
            },
        },
        "dataset": {
            "raw_data": images,
            "val_split": 0,
            "elements": elements,
            "fp_params": Gs,
            "save_fps": True,
            "scaling": {
                "type": "normalize",
                "range": (-1, 1)
            },
        },
        "cmd": {
            "debug": False,
            "run_dir": "./",
            "seed": 1,
            "identifier": "test",
            "verbose": True,
            "logger": False,
            "dtype": torch.DoubleTensor,
        },
    }

    trainer = AtomsTrainer(config)

    # building base morse calculator as base calculator
    cutoff = Gs["default"]["cutoff"]
    neb_images = images.copy()
    base_calc = MultiMorse(neb_images, cutoff, combo="mean")
    # base_calc = Dummy(images)

    # define learner_params OfflineActiveLearner

    learner_params = {
        "atomistic_method":
        NEBcalc(
            starting_images=neb_images,
            intermediate_samples=intermediate_images,
        ),
        "max_iterations":
        iter,
        "samples_to_retrain":
        intermediate_images,
        "filename":
        "example",
        "file_dir":
        "./",
        "use_dask":
        False,
        # "max_evA": 0.01,
    }

    learner = NEBLearner(learner_params, images, trainer, parent_calc,
                         base_calc)
    learner.learn()
    return learner