示例#1
0
import pandas as pd
import time
import chemtorch.nn as ct_nn
import chemtorch.io as ct_io
import chemtorch.structure as ct_st
import chemtorch.features.basis.piecewise_cosine as ct_ft_cos

engy_scalar = MinMaxScaler(feature_range=(0, 1))
feat_scalar = MinMaxScaler(feature_range=(0, 1))

dtype, device = torch.float, torch.device('cpu')

M2, M3 = 25, 5
num_feat, num_engy = M2 + M3**3, 1
mlp = [num_feat, 50, 50, 50, num_engy]
weights, biases = ct_nn.get_weights(mlp, xavier=True), ct_nn.get_biases(mlp)
optimizer = torch.optim.Adam(biases + weights, lr=1e-4)

vfeatFile = "tests\\data\\vfeat" if platform == 'win32' else "tests/data/vfeat"
vengyFile = "tests\\data\\vengy" if platform == 'win32' else "tests/data/vengy"
feat = pd.read_csv(vfeatFile, header=None).values.astype(np.float32)
engy = pd.read_csv(vengyFile,
                   header=None).values.astype(np.float32).reshape(-1, 1)
feat_scalar.fit_transform(feat)
engy_scalar.fit_transform(engy)
(a, b) = feat.shape
feat_b = feat_scalar.transform(np.zeros((1, b))).astype(np.float32)
feat_a = feat_scalar.transform(np.ones((1, b))).astype(np.float32) - feat_b
engy_b = engy_scalar.transform(np.zeros((1, 1))).astype(np.float32)
engy_a = engy_scalar.transform(np.ones((1, 1))).astype(np.float32) - engy_b
示例#2
0
from chemtorch.io import stream_structures
from chemtorch.features import get_dG_dR

from chemtorch.parameters import settings
settings['epoch'] = 50

weights, biases = None, None
dtype, device = locate(settings['dtype']), settings['device']

M2, M3 = settings['M2'], settings['M3']
num_feat, num_engy = M2 + M3**3, 1
mlp = [num_feat] + settings['hidden_layers'] + [num_engy]

# initialize weights and biases if they are not provided
weights = get_weights(mlp) if weights is None else list(np2torch(*weights))
biases = get_biases(mlp) if biases is None else list(np2torch(*biases))
for (w, b) in zip(weights, biases):
    w.requires_grad, b.requires_grad = True, True

optimizer = torch.optim.Adam(biases + weights, lr=settings['learning_rate'])

feat_a, feat_b = get_scalar_csv(settings["valid_feat_file"])
engy_a, engy_b = get_scalar_csv(settings["valid_engy_file"])

feat_a, feat_b = np2torch(feat_a, feat_b, dtype=dtype)
engy_a, engy_b = np2torch(engy_a, engy_b, dtype=dtype)

for i_epoch in range(settings['epoch']):

    start = time.time()
import time
import chemtorch.nn as ct_nn
import chemtorch.io as ct_io
import chemtorch.ml as ct_ml
import chemtorch.features as ct_ft
from pydoc import locate

from chemtorch.parameters import settings

dtype, device = locate(settings['dtype']), settings['device']

M2, M3 = settings['M2'], settings['M3']
num_feat, num_engy = M2 + M3**3, 1
mlp = [num_feat] + settings['hidden_layers'] + [num_engy]
Rc = settings['Router']
weights, biases = ct_nn.get_weights(mlp), ct_nn.get_biases(mlp)
optimizer = torch.optim.Adam(biases + weights, lr=settings['learning_rate'])

feat_a, feat_b = ct_ml.get_scalar_csv(settings["valid_feat_file"])
engy_a, engy_b = ct_ml.get_scalar_csv(settings["valid_engy_file"])

feat_a, feat_b = ct_nn.np2torch(feat_a, feat_b, dtype=dtype)
engy_a, engy_b = ct_nn.np2torch(engy_a, engy_b, dtype=dtype)

for i_epoch in range(settings['epoch']):
    start = time.time()
    # featFile = "tests\\data\\feat" if platform == 'win32' else "tests/data/feat"
    # engyFile = "tests\\data\\engy" if platform == 'win32' else "tests/data/engy"
    feat_chunk = pd.read_csv(settings['train_feat_file'],
                             header=None,
                             chunksize=settings['chunk_size'])
示例#4
0
G2 = torch.sum(phi2b, dim=1)

# shape of G3 will be: natoms x alpha x gamma x maxNb
G3 = torch.matmul(
    phi3b_ij.transpose(1, 2)[:, :, None, None, :],
    phi3b_ijk.transpose(1, 3)[:, None, :, :, :]).squeeze()

# shape of G3 will be: natoms x alpha x beta x gamma
G3 = torch.matmul(
    phi3b_ij.transpose(1, 2)[:, None, :, None, :],
    G3.transpose(2, 3)[:, :, None, :, :]).squeeze()

G = torch.cat((G2, G3.reshape(len(G3), -1)), 1)

M2, M3 = settings['M2'], settings['M3']
num_feat, num_engy = M2 + M3**3, 1
mlp = [num_feat, *settings['hidden_layers'], num_engy]

# initialize weights and biases if they are not provided
weights = get_weights(mlp)
biases = get_biases(mlp)

gscalar = get_scalar_csv(settings["valid_feat_file"])
escalar = get_scalar_csv(settings["valid_engy_file"])
gscalar, escalar = list(np2torch(*gscalar)), list(np2torch(*escalar))

G = gscalar[0] * G + gscalar[1]

E = mlnn(G, weights, biases)

F = grad(torch.sum(E), Rcart, create_graph=True)[0]