Exemple #1
0
pwmat_mmt = stream_structures(settings['input_file'], format=settings['input_format'], get_forces=True)
Rcart, lattice, atom_types, F, Ei = next(pwmat_mmt)
nAtoms = len(Rcart)

gscalar = get_scalar_csv(settings["valid_feat_file"])
escalar = get_scalar_csv(settings["valid_engy_file"])

dtype, device = locate(settings['dtype']), settings['device']

M2, M3 = settings['M2'], settings['M3']
num_feat, num_engy = M2 + M3 ** 3, 1
mlp = [num_feat, *settings['hidden_layers'], num_engy]
Rc = settings['Router']

# initialize weights and biases if they are not provided
weights = list(np2torch(*weights))
biases = list(np2torch(*biases))
for (w, b) in zip(weights, biases): w.requires_grad, b.requires_grad = True, True

# convert gscalar and escale to torch tensors
gscalar, escalar = list(np2torch(*gscalar, dtype=dtype)), list(np2torch(*escalar, dtype=dtype))


start = time.time()
g, g_dldl, g_dpdl, idxNb = get_dG_dR(Rcart, lattice,
                                     basis='cosine', M2=M2, M3=M3, Rinner=0, Router=Rc)
g, g_dldl, g_dpdl = np2torch(g, g_dldl, g_dpdl, dtype=dtype)
feat_scaled = gscalar[0] * g + gscalar[1]

nn_out2, d_nn_out2 = d_mlnn(feat_scaled, weights, biases, activation="sigmoid")
npPhi3_ijk[npDijk > 0] = cf.piecewise_cosine(npDijk[npDijk > 0],
                                             M3,
                                             start=Rinner,
                                             stop=R3)

z3a = np.matmul(npPhi3_ij.transpose([0, 2, 1]),
                npPhi3_ijk.reshape((nAtoms, npMaxNb, -1)))
z3a = z3a.reshape((nAtoms, M3, npMaxNb, M3)).transpose([0, 2, 1, 3])
g2 = npPhi2.sum(axis=1)
g3 = np.matmul(npPhi3_ij.transpose([0, 2, 1]),
               z3a.reshape((nAtoms, npMaxNb, -1))).reshape((nAtoms, -1))
g = np.concatenate([g2, g3], axis=1)

# ===============================================================

Rcart, lattice = np2torch(Rcart, lattice)
Rcart.requires_grad = True

nblist, Rij = get_nb(Rcart, lattice, settings['Router'])
dij, dijk, Rhat = get_distances(Rij)

phi2b = torch.zeros(*dij.shape, settings['M2'])
phi3b_ij = torch.zeros(*dij.shape, settings['M3'])
phi3b_ijk = torch.zeros(*dijk.shape, settings['M3'])

phi2b[dij > 0] = pcosine(dij[dij > 0],
                         settings['M2'],
                         start=settings['Rinner'],
                         stop=R2)
phi3b_ij[dij > 0] = pcosine(dij[dij > 0],
                            settings['M3'],
from pydoc import locate
import chemtorch.nn as ct_nn
import chemtorch.io as ct_io

from chemtorch.io.parameters import load_weights_biases

from torch.autograd import grad
import time
import torch

settings["Router"] = 6.2

dtype, device = locate(settings['dtype']), settings['device']

weights, biases = load_weights_biases(len(settings['hidden_layers']) + 1)
weights = list(ct_nn.np2torch(*weights))
biases = list(ct_nn.np2torch(*biases))
# for (w, b) in zip(weights, biases): w.requires_grad, b.requires_grad = True, True

gscalar = get_scalar_csv(settings["valid_feat_file"])
escalar = get_scalar_csv(settings["valid_engy_file"])

gscalar, escalar = list(ct_nn.np2torch(*gscalar, dtype=dtype)), list(
    ct_nn.np2torch(*escalar, dtype=dtype))

M2, M3 = settings['M2'], settings['M3']
num_feat, num_engy = M2 + M3**3, 1
mlp = [num_feat] + settings['hidden_layers'] + [num_engy]
Rc = settings['Router']

# optimizer = torch.optim.Adam(biases + weights, lr=settings['learning_rate'])
from chemtorch.parameters import settings

dtype, device = locate(settings['dtype']), settings['device']

M2, M3 = settings['M2'], settings['M3']
num_feat, num_engy = M2 + M3**3, 1
mlp = [num_feat] + settings['hidden_layers'] + [num_engy]
Rc = settings['Router']
weights, biases = ct_nn.get_weights(mlp), ct_nn.get_biases(mlp)
optimizer = torch.optim.Adam(biases + weights, lr=settings['learning_rate'])

feat_a, feat_b = ct_ml.get_scalar_csv(settings["valid_feat_file"])
engy_a, engy_b = ct_ml.get_scalar_csv(settings["valid_engy_file"])

feat_a, feat_b = ct_nn.np2torch(feat_a, feat_b, dtype=dtype)
engy_a, engy_b = ct_nn.np2torch(engy_a, engy_b, dtype=dtype)

for i_epoch in range(settings['epoch']):
    start = time.time()
    # featFile = "tests\\data\\feat" if platform == 'win32' else "tests/data/feat"
    # engyFile = "tests\\data\\engy" if platform == 'win32' else "tests/data/engy"
    feat_chunk = pd.read_csv(settings['train_feat_file'],
                             header=None,
                             chunksize=settings['chunk_size'])
    engy_chunk = pd.read_csv(settings['train_engy_file'],
                             header=None,
                             chunksize=settings['chunk_size'])
    for step, (feat, engy) in enumerate(zip(feat_chunk, engy_chunk)):

        feat_scaled = feat_a * ct_nn.np2torch(feat.values,
Exemple #5
0
from chemtorch.ml import get_scalar_csv
from chemtorch.io import stream_structures
from chemtorch.features import get_dG_dR

from chemtorch.parameters import settings
settings['epoch'] = 50

weights, biases = None, None
dtype, device = locate(settings['dtype']), settings['device']

M2, M3 = settings['M2'], settings['M3']
num_feat, num_engy = M2 + M3**3, 1
mlp = [num_feat] + settings['hidden_layers'] + [num_engy]

# initialize weights and biases if they are not provided
weights = get_weights(mlp) if weights is None else list(np2torch(*weights))
biases = get_biases(mlp) if biases is None else list(np2torch(*biases))
for (w, b) in zip(weights, biases):
    w.requires_grad, b.requires_grad = True, True

optimizer = torch.optim.Adam(biases + weights, lr=settings['learning_rate'])

feat_a, feat_b = get_scalar_csv(settings["valid_feat_file"])
engy_a, engy_b = get_scalar_csv(settings["valid_engy_file"])

feat_a, feat_b = np2torch(feat_a, feat_b, dtype=dtype)
engy_a, engy_b = np2torch(engy_a, engy_b, dtype=dtype)

for i_epoch in range(settings['epoch']):

    start = time.time()
Exemple #6
0
from pydoc import locate
import chemtorch.nn as ct_nn
import chemtorch.io as ct_io

from torch.autograd import grad
import time
import torch

settings["Router"] = 6.2

dtype, device = locate(settings['dtype']), settings['device']

gscalar = get_scalar_csv(settings["valid_feat_file"])
escalar = get_scalar_csv(settings["valid_engy_file"])

gscalar, escalar = list(ct_nn.np2torch(*gscalar, dtype=dtype)), list(
    ct_nn.np2torch(*escalar, dtype=dtype))

M2, M3 = settings['M2'], settings['M3']
num_feat, num_engy = M2 + M3**3, 1
mlp = [num_feat] + settings['hidden_layers'] + [num_engy]
Rc = settings['Router']

weights, biases = ct_nn.get_weights(mlp), ct_nn.get_biases(mlp)
optimizer = torch.optim.Adam(biases + weights, lr=settings['learning_rate'])

for iEpoch in range(15):
    filename = settings['input_file']
    pwmat_mmt = ct_io.stream_structures(filename,
                                        format=settings['input_format'],
                                        get_forces=True)
vfeatFile = "tests\\data\\vfeat" if platform == 'win32' else "tests/data/vfeat"
vengyFile = "tests\\data\\vengy" if platform == 'win32' else "tests/data/vengy"
feat_a, feat_b = ct_ml.get_scalar_csv(vfeatFile)
engy_a, engy_b = ct_ml.get_scalar_csv(vengyFile)
torch_engy_a, torch_engy_b = ct_nn.torch2np(engy_a, engy_b, dtype=dtype)

for iter in range(5):
    start = time.time()
    featFile = "tests\\data\\feat" if platform == 'win32' else "tests/data/feat"
    engyFile = "tests\\data\\engy" if platform == 'win32' else "tests/data/engy"
    feat_chunk = pd.read_csv(featFile, header=None, chunksize=1000)
    engy_chunk = pd.read_csv(engyFile, header=None, chunksize=1000)
    for step, (feat, engy) in enumerate(zip(feat_chunk, engy_chunk)):

        feat_scaled = ct_nn.np2torch(feat_a * feat.values + feat_b,
                                     dtype=dtype)
        engy_scaled = ct_nn.np2torch(engy_a * engy.values.reshape(
            (-1, 1)) + engy_b,
                                     dtype=dtype)

        nn_out = ct_nn.mlnn(feat_scaled, weights, biases, activation="sigmoid")
        loss = torch.sum((nn_out - engy_scaled)**2)
        ct_nn.mlnn_optimize(loss, optimizer)

    Ep = (nn_out - torch_engy_b) / torch_engy_a
    rmse = torch.sqrt(
        torch.mean((Ep - ct_nn.np2torch(engy.values.reshape((-1, 1))))**2))
    print(iter, ct_nn.torch2np(rmse), time.time() - start)

filename = "tests\data\MOVEMENT.train" if platform == 'win32' else "tests/data/MOVEMENT.train"
pwmat_mmt = ct_io.stream_structures(filename, format="pwmat", get_forces=True)
Exemple #8
0
M2 = settings['M2']
M3 = settings['M3']
Rinner = settings['Rinner']
Router = settings['Router']
R2 = Router - (Router - Rinner) / M2
R3 = Router - (Router - Rinner) / M3
dcut = settings['Router']

pwmat_mmt = stream_structures(settings['input_file'],
                              format=settings['input_format'],
                              get_forces=True)
Rcart, lattice, atom_types, F, Ei = next(pwmat_mmt)
nAtoms = len(Rcart)

Rcart, lattice = np2torch(Rcart, lattice)
Rcart.requires_grad = True

# nAtoms = Rcart.shape[0]
# dim = Rcart.shape[-1]
#

nblist, Rij = get_nb(Rcart, lattice, settings['Router'])
dij, dijk, Rhat = get_distances(Rij)

phi2b = torch.zeros(*dij.shape, settings['M2'])
phi3b_ij = torch.zeros(*dij.shape, settings['M3'])
phi3b_ijk = torch.zeros(*dijk.shape, settings['M3'])

phi2b[dij > 0] = pcosine(dij[dij > 0],
                         settings['M2'],