gscalar = get_scalar_csv(settings["valid_feat_file"])
escalar = get_scalar_csv(settings["valid_engy_file"])

gscalar, escalar = list(ct_nn.np2torch(*gscalar, dtype=dtype)), list(
    ct_nn.np2torch(*escalar, dtype=dtype))

M2, M3 = settings['M2'], settings['M3']
num_feat, num_engy = M2 + M3**3, 1
mlp = [num_feat] + settings['hidden_layers'] + [num_engy]
Rc = settings['Router']

# optimizer = torch.optim.Adam(biases + weights, lr=settings['learning_rate'])

filename = settings['input_file']
pwmat_mmt = ct_io.stream_structures(filename,
                                    format=settings['input_format'],
                                    get_forces=True)
Rcart, lattice, atom_types, Fi, Ei = next(pwmat_mmt)

Ei = Ei - 272
start = time.time()
Rcart, lattice, Fi, Ei = ct_nn.np2torch(Rcart, lattice, Fi, Ei, dtype=dtype)
Rcart.requires_grad = True

G = ct_nn.R2G(Rcart,
              lattice,
              M2=M2,
              M3=M3,
              Rinner=settings["Rinner"],
              Router=settings['Router'])
Exemplo n.º 2
0
from chemtorch.parameters import settings

settings['Router'] = 6.2

weights, biases = load_weights_biases(len(settings['hidden_layers'])+1)

M2 = settings['M2']
M3 = settings['M3']
Rinner = settings['Rinner']
Router = settings['Router']
R2 = Router - (Router - Rinner) / M2
R3 = Router - (Router - Rinner) / M3
dcut = settings['Router']


pwmat_mmt = stream_structures(settings['input_file'], format=settings['input_format'], get_forces=True)
Rcart, lattice, atom_types, F, Ei = next(pwmat_mmt)
nAtoms = len(Rcart)

gscalar = get_scalar_csv(settings["valid_feat_file"])
escalar = get_scalar_csv(settings["valid_engy_file"])

dtype, device = locate(settings['dtype']), settings['device']

M2, M3 = settings['M2'], settings['M3']
num_feat, num_engy = M2 + M3 ** 3, 1
mlp = [num_feat, *settings['hidden_layers'], num_engy]
Rc = settings['Router']

# initialize weights and biases if they are not provided
weights = list(np2torch(*weights))
                                     dtype=dtype)
        engy_scaled = ct_nn.np2torch(engy_a * engy.values.reshape(
            (-1, 1)) + engy_b,
                                     dtype=dtype)

        nn_out = ct_nn.mlnn(feat_scaled, weights, biases, activation="sigmoid")
        loss = torch.sum((nn_out - engy_scaled)**2)
        ct_nn.mlnn_optimize(loss, optimizer)

    Ep = (nn_out - torch_engy_b) / torch_engy_a
    rmse = torch.sqrt(
        torch.mean((Ep - ct_nn.np2torch(engy.values.reshape((-1, 1))))**2))
    print(iter, ct_nn.torch2np(rmse), time.time() - start)

filename = "tests\data\MOVEMENT.train" if platform == 'win32' else "tests/data/MOVEMENT.train"
pwmat_mmt = ct_io.stream_structures(filename, format="pwmat", get_forces=True)
for step, (Rcart, lattice, atom_types, F, Ei) in enumerate(pwmat_mmt):
    g, g_dldl, g_dpdl, idxNb = ct_ft.get_dG_dR(Rcart,
                                               lattice,
                                               basis='cosine',
                                               M2=M2,
                                               M3=M3,
                                               Rinner=0,
                                               Router=Rc)
    feat_scaled = feat_a * g + feat_b
    g_dldl, g_dpdl, torch_feat_a, feat_scaled = ct_nn.np2torch(g_dldl,
                                                               g_dpdl,
                                                               feat_a,
                                                               feat_scaled,
                                                               dtype=dtype)
    nn_out2, d_nn_out2 = ct_nn.d_mlnn(feat_scaled,