コード例 #1
0
    data_dir = "/public/WORK_backup/caizefeng/Datasets/STO_600_cut9_gauss16"
    test_file_path = "/public/WORK_backup/caizefeng/Datasets/STO_600_cut9_gauss16/test/10099.npy"

    net = SVTNetCharge(num_element=3,
                       sigma_size=16,
                       **net_name_parse(net_file_name,
                                        is_temp=False)).to(torch.float64)
    net.load_state_dict(torch.load(os.path.join("nets", net_file_name)))

    # net = torch.load(os.path.join("nets", net_file_name)).cpu()

    net.eval()
    data = np.load(test_file_path)

    # load pre-calculated mean and std to make the prediction
    train_mean, train_std = standardization2D(read_saved=True,
                                              data_path=data_dir)
    charge_dft = data[:, -1]
    with torch.no_grad():
        charge_pred = net(
            (torch.from_numpy(data[:, :-1]) - train_mean) / train_std).numpy()

    # plot
    plt.figure(figsize=(10, 10))
    plt.scatter(charge_pred, charge_dft, c=charge_pred, cmap='viridis', s=10)
    plt.plot(np.linspace(0, 16, 50),
             np.linspace(0, 16, 50),
             ls="dashed",
             c="grey")
    plt.xlabel(r'$\rho_{\,\mathrm{pred}}$', fontsize=24)
    plt.ylabel(r'$\rho_{\,\mathrm{scf}}$', fontsize=24)
    # plt.title("Logrithm parity plot for the deep learning vs DFT charge density prediction", fontsize=16)
コード例 #2
0
    # validation and testing dataloader
    dataset_test_list = []
    for test_feature_file, test_ldos_file in zip(test_feature_path_list,
                                                 test_ldos_path_list):
        all_data_test = torch.from_numpy(np.load(test_feature_file))
        ldos_data_test = torch.from_numpy(np.load(test_ldos_file))
        dataset_test_list.append(
            TensorDataset(all_data_test[:, :-1],
                          ldos_data_test[:, :net_hp["num_windows"]]))
    test_iter = DataLoader(ConcatDataset(dataset_test_list),
                           **dataload_hp_test)

    # calculate mean and std over the whole training set
    train_mean, train_std = standardization2D(
        read_saved=True,
        data_path=data_dir,
        num_feature=num_feature,
        train_path_list=train_feature_path_list)

    # training and validiting
    training_loss, batch_count = 0.0, 0
    train_path_list = [
        x for x in zip(train_feature_path_list, train_ldos_path_list)
    ]

    train_scalar_list = []
    test_scalar_list = []

    net.to(device)
    for epoch in range(train_hp["num_epoch"]):
        random.shuffle(train_path_list)
コード例 #3
0
    dataset_test_list.append(dataset)
dataset_test = ConcatDataset(dataset_test_list)
test_iter = DataLoader(dataset_test, **dataload_hp_test)

# training dataset
dataset_train_list = []
for i, train_file in enumerate(train_path_list):
    dataset = MmapDataset2D(train_file, num_column)
    dataset_train_list.append(dataset)
dataset_train = ConcatDataset(dataset_train_list)
train_iter = DataLoader(dataset_train, **dataload_hp)

# calculate mean and std over the whole training set
train_iter_more = DataLoader(dataset_train, **dataload_hp_test)
train_mean, train_std = standardization2D(read_saved=True,
                                          data_path=data_dir,
                                          num_feature=num_feature,
                                          train_iter_mmap=train_iter_more)

# training
training_loss, batch_count = 0.0, 0
for epoch in range(train_hp["num_epoch"]):
    for i, (X, y) in enumerate(train_iter):
        # TensorBoard Graph
        if i == 0 and epoch == 0:
            train_writer.add_graph(net, X)

        net.to(device)
        X = ((X - train_mean) / train_std).to(device)
        y = y.to(device)
        optimizer.zero_grad()
        # forward + backward + optimize