] idx = np.random.permutation(len(files)) idx = idx.tolist() valid_ids = [files[i] for i in idx[0:10000]] test_ids = [files[i] for i in idx[10000:20000]] train_ids = [files[i] for i in idx[20000:]] data_train = Qm9(root, train_ids, vertex_transform=utils.qm9_nodes, edge_transform=lambda g: utils.qm9_edges( g, e_representation='raw_distance')) data_valid = Qm9(root, valid_ids) data_test = Qm9(root, test_ids) print(len(data_train)) print(len(data_valid)) print(len(data_test)) print(data_train[1]) print(data_valid[1]) print(data_test[1]) start = time.time() print(utils.get_graph_stats(data_valid, 'degrees')) end = time.time() print('Time Statistics Par') print(end - start)
import os, sys import torch parser = argparse.ArgumentParser(description='QM9 Object.') # Optional argument parser.add_argument('--root', nargs=1, help='Specify the data directory.', default=['GraphReader/']) args = parser.parse_args() root = args.root[0] files = [f for f in os.listdir(root) \ if os.path.isfile(os.path.join(root, f)) \ and os.path.splitext(f)[-1] == ".json"] test = AIChemy(root, files, vertex_transform=utils.qm9_nodes, e_representation="raw_distance") print(len(test)) start = time.time() print(utils.get_graph_stats(test, ['target_mean', 'target_std'])) end = time.time() print('Time Statistics Par') print(end - start)