import chemtorch.ml as ct_ml
import chemtorch.features as ct_ft
from pydoc import locate

from chemtorch.parameters import settings

dtype, device = locate(settings['dtype']), settings['device']

M2, M3 = settings['M2'], settings['M3']
num_feat, num_engy = M2 + M3**3, 1
mlp = [num_feat] + settings['hidden_layers'] + [num_engy]
Rc = settings['Router']
weights, biases = ct_nn.get_weights(mlp), ct_nn.get_biases(mlp)
optimizer = torch.optim.Adam(biases + weights, lr=settings['learning_rate'])

feat_a, feat_b = ct_ml.get_scalar_csv(settings["valid_feat_file"])
engy_a, engy_b = ct_ml.get_scalar_csv(settings["valid_engy_file"])

feat_a, feat_b = ct_nn.np2torch(feat_a, feat_b, dtype=dtype)
engy_a, engy_b = ct_nn.np2torch(engy_a, engy_b, dtype=dtype)

for i_epoch in range(settings['epoch']):
    start = time.time()
    # featFile = "tests\\data\\feat" if platform == 'win32' else "tests/data/feat"
    # engyFile = "tests\\data\\engy" if platform == 'win32' else "tests/data/engy"
    feat_chunk = pd.read_csv(settings['train_feat_file'],
                             header=None,
                             chunksize=settings['chunk_size'])
    engy_chunk = pd.read_csv(settings['train_engy_file'],
                             header=None,
                             chunksize=settings['chunk_size'])
示例#2
0
weights, biases = load_weights_biases(len(settings['hidden_layers'])+1)

M2 = settings['M2']
M3 = settings['M3']
Rinner = settings['Rinner']
Router = settings['Router']
R2 = Router - (Router - Rinner) / M2
R3 = Router - (Router - Rinner) / M3
dcut = settings['Router']


pwmat_mmt = stream_structures(settings['input_file'], format=settings['input_format'], get_forces=True)
Rcart, lattice, atom_types, F, Ei = next(pwmat_mmt)
nAtoms = len(Rcart)

gscalar = get_scalar_csv(settings["valid_feat_file"])
escalar = get_scalar_csv(settings["valid_engy_file"])

dtype, device = locate(settings['dtype']), settings['device']

M2, M3 = settings['M2'], settings['M3']
num_feat, num_engy = M2 + M3 ** 3, 1
mlp = [num_feat, *settings['hidden_layers'], num_engy]
Rc = settings['Router']

# initialize weights and biases if they are not provided
weights = list(np2torch(*weights))
biases = list(np2torch(*biases))
for (w, b) in zip(weights, biases): w.requires_grad, b.requires_grad = True, True

# convert gscalar and escale to torch tensors
import chemtorch.io as ct_io
import chemtorch.ml as ct_ml
import chemtorch.features as ct_ft

dtype, device = torch.float, torch.device('cpu')

M2, M3 = 25, 5
num_feat, num_engy = M2 + M3**3, 1
mlp = [num_feat, 50, 50, num_engy]
weights, biases = ct_nn.get_weights(mlp), ct_nn.get_biases(mlp)
optimizer = torch.optim.Adam(biases + weights, lr=1e-4)
Rc = 6.2

vfeatFile = "tests\\data\\vfeat" if platform == 'win32' else "tests/data/vfeat"
vengyFile = "tests\\data\\vengy" if platform == 'win32' else "tests/data/vengy"
feat_a, feat_b = ct_ml.get_scalar_csv(vfeatFile)
engy_a, engy_b = ct_ml.get_scalar_csv(vengyFile)
torch_engy_a, torch_engy_b = ct_nn.torch2np(engy_a, engy_b, dtype=dtype)

for iter in range(5):
    start = time.time()
    featFile = "tests\\data\\feat" if platform == 'win32' else "tests/data/feat"
    engyFile = "tests\\data\\engy" if platform == 'win32' else "tests/data/engy"
    feat_chunk = pd.read_csv(featFile, header=None, chunksize=1000)
    engy_chunk = pd.read_csv(engyFile, header=None, chunksize=1000)
    for step, (feat, engy) in enumerate(zip(feat_chunk, engy_chunk)):

        feat_scaled = ct_nn.np2torch(feat_a * feat.values + feat_b,
                                     dtype=dtype)
        engy_scaled = ct_nn.np2torch(engy_a * engy.values.reshape(
            (-1, 1)) + engy_b,