Beispiel #1
0
descriptor = SymmetryFunction(cut_name="cos",
                              cut_dists={"Si-Si": 5.0},
                              hyperparams="set30",
                              normalize=True)

model = LinearRegression(descriptor)

# training set
dataset_path = download_dataset(dataset_name="Si_training_set")
dataset_path = dataset_path.joinpath("varying_alat")
tset = Dataset(dataset_path)
configs = tset.get_configs()
print("Number of configurations:", len(configs))

# calculator
calc = CalculatorTorch(model)
calc.create(configs, reuse=False)

##########################################################################################
# We can train a linear regression model by minimizing a loss function as discussed in
# :ref:`tut_nn`. But linear regression model has analytic solutions, and thus we can train
# the model directly by using this feature. This can be achieved by calling the ``fit()``
# function of its calculator.
#

# fit the model
calc.fit()

# save model
model.save("linear_model.pkl")
Beispiel #2
0
##########################################################################################
# Load the parameters from the saved model.
# If we are load a model to continue the training (the case here), ``mode`` needs to be
# set to ``train``; if we load the model for evaluation, it should be ``eval``. For
# fully-connected layer, this actually does not matter. But for dropout and batchNorm
# layers, the two modes are different.
model.load(path="final_model.pkl", mode="train")

# training set
dataset_name = "Si_training_set/varying_alat"
tset = Dataset()
tset.read(dataset_name)
configs = tset.get_configs()

# calculator
calc = CalculatorTorch(model)
calc.create(configs, reuse=True)

# loss
loss = Loss(calc, residual_data={"forces_weight": 0.3})

##########################################################################################
# load the state dictionary of the optimizer.
# We also set ``start_epoch`` to ``10`` such that the epoch number continues from the last
# training.

loss.load_optimizer_stat("optimizer_stat.pkl")
result = loss.minimize(method="Adam",
                       num_epochs=10,
                       start_epoch=10,
                       batch_size=100,