engyFile = "tests\\data\\engy" if platform == 'win32' else "tests/data/engy" featFile = "tests\\data\\feat" if platform == 'win32' else "tests/data/feat" engyFile = "tests\\data\\engy" if platform == 'win32' else "tests/data/engy" for iter in range(10): start = time.time() feat_chunk = pd.read_csv(featFile, header=None, chunksize=1000) engy_chunk = pd.read_csv(engyFile, header=None, chunksize=1000) for step, (feat, engy) in enumerate(zip(feat_chunk, engy_chunk)): feat = feat.values.astype(np.float32) engy = engy.values.astype(np.float32).reshape(-1, 1) feat_scaled = torch.from_numpy(feat_a * feat + feat_b) engy_scaled = torch.from_numpy(engy_a * engy + engy_b) nn_out = ct_nn.mlnn(feat_scaled, weights, biases, activation="sigmoid") loss = torch.sum((nn_out - engy_scaled)**2) ct_nn.mlnn_optimize(loss, optimizer) Ep = (nn_out - torch_engy_b) / torch_engy_a rmse = torch.sqrt(torch.mean((Ep - torch.from_numpy(engy))**2)) print(iter, step, rmse.data.numpy(), time.time() - start) for iter in range(2): feat_chunk = pd.read_csv(featFile, header=None, chunksize=256) engy_chunk = pd.read_csv(engyFile, header=None, chunksize=256) filename = "tests\data\MOVEMENT.train" if platform == 'win32' else "tests/data/MOVEMENT.train" mmt = ct_io.read_PWMat_movement(filename, get_forces=True, get_velocities=True,
M2, M3 = settings['M2'], settings['M3'] num_feat, num_engy = M2 + M3**3, 1 mlp = [num_feat] + settings['hidden_layers'] + [num_engy] Rc = settings['Router'] # optimizer = torch.optim.Adam(biases + weights, lr=settings['learning_rate']) filename = settings['input_file'] pwmat_mmt = ct_io.stream_structures(filename, format=settings['input_format'], get_forces=True) Rcart, lattice, atom_types, Fi, Ei = next(pwmat_mmt) Ei = Ei - 272 start = time.time() Rcart, lattice, Fi, Ei = ct_nn.np2torch(Rcart, lattice, Fi, Ei, dtype=dtype) Rcart.requires_grad = True G = ct_nn.R2G(Rcart, lattice, M2=M2, M3=M3, Rinner=settings["Rinner"], Router=settings['Router']) Gscaled = G * gscalar[0] + gscalar[1] Ep = ct_nn.mlnn(Gscaled, weights, biases).squeeze() Fp = -grad(torch.sum(Ep), Rcart, create_graph=True)[0]
lattice, Fi, Ei, dtype=dtype) Rcart.requires_grad = True G = ct_nn.R2G(Rcart, lattice, M2=M2, M3=M3, Rinner=settings["Rinner"], Router=settings['Router']) G = G * gscalar[0] + gscalar[1] Ep = ct_nn.mlnn(G, weights, biases).squeeze() Fp = -grad(torch.sum(Ep), Rcart, create_graph=True)[0] Emse = torch.mean((Ep - Ei)**2) Fmse = torch.mean((Fp - Fi)**2) loss = Emse + Fmse loss.backward() optimizer.step() optimizer.zero_grad() Rcart.grad.zero_() with torch.no_grad(): print(iEpoch, i, torch.sqrt(Emse).numpy(),
start = time.time() feat_chunk = pd.read_csv(settings['train_feat_file'], header=None, chunksize=settings['chunk_size']) engy_chunk = pd.read_csv(settings['train_engy_file'], header=None, chunksize=settings['chunk_size']) for step, (feat, engy) in enumerate(zip(feat_chunk, engy_chunk)): feat_scaled = feat_a * np2torch(feat.values, dtype=dtype) + feat_b engy_scaled = engy_a * np2torch(engy.values.reshape((-1, 1)), dtype=dtype) + engy_b nn_out = mlnn(feat_scaled, weights, biases, activation="sigmoid") loss = torch.sum((nn_out - engy_scaled)**2) mlnn_optimize(loss, optimizer) Ei = np2torch(engy.values.reshape((-1, 1))) Ep = (nn_out - engy_b) / engy_a rmse = torch.sqrt(torch.mean((Ep - Ei)**2)) print(i_epoch, torch2np(rmse), time.time() - start) dtype, device = locate(settings['dtype']), settings['device'] M2, M3 = settings['M2'], settings['M3'] num_feat, num_engy = M2 + M3**3, 1 mlp = [num_feat] + settings['hidden_layers'] + [num_engy]
G2 = torch.sum(phi2b, dim=1) # shape of G3 will be: natoms x alpha x gamma x maxNb G3 = torch.matmul( phi3b_ij.transpose(1, 2)[:, :, None, None, :], phi3b_ijk.transpose(1, 3)[:, None, :, :, :]).squeeze() # shape of G3 will be: natoms x alpha x beta x gamma G3 = torch.matmul( phi3b_ij.transpose(1, 2)[:, None, :, None, :], G3.transpose(2, 3)[:, :, None, :, :]).squeeze() G = torch.cat((G2, G3.reshape(len(G3), -1)), 1) M2, M3 = settings['M2'], settings['M3'] num_feat, num_engy = M2 + M3**3, 1 mlp = [num_feat, *settings['hidden_layers'], num_engy] # initialize weights and biases if they are not provided weights = get_weights(mlp) biases = get_biases(mlp) gscalar = get_scalar_csv(settings["valid_feat_file"]) escalar = get_scalar_csv(settings["valid_engy_file"]) gscalar, escalar = list(np2torch(*gscalar)), list(np2torch(*escalar)) G = gscalar[0] * G + gscalar[1] E = mlnn(G, weights, biases) F = grad(torch.sum(E), Rcart, create_graph=True)[0]