Пример #1
0
    
    # Data
    print("Loading data...")
    # df = pd.read_pickle('./data/sample_slice_LS_BP.pkl')
    df = pd.read_pickle('./data/sample_slice_LS_BP_3T.pkl')
    X = df.loc[:, ['b0', 'b10', 'b20', 'b60', 'b150', 'b300', 'b500', 'b1000']].values.astype('float32')
    # normalize dw signal
    S0 = X[:,0].copy()
    for i in range(len(hp.b_values)):
      X[:,i] = X[:,i]/S0
    X = X[:,1:]
    print("Done")

    # Network
    b_values_no0 = torch.FloatTensor(hp.b_values[1:])
    net = Net(b_values_no0).to(device)

    # Restore variables from disk
    # net.load_state_dict(torch.load(hp.logdir + 'sample_slice/final_model.pt', map_location=device))
    net.load_state_dict(torch.load(hp.logdir + 'sample_slice/final_model_3T.pt', map_location=device))

    # evaluate on test data
    net.eval()
    with torch.no_grad():
      X_pred, Dp_pred, Dt_pred, Fp_pred = net(torch.from_numpy(X.astype(np.float32)))

    df['Dp_NN'] = Dp_pred.numpy()
    df['Dt_NN'] = Dt_pred.numpy()
    df['Fp_NN'] = Fp_pred.numpy()

    # df.to_pickle('./data/sample_slice_LS_BP_NN.pkl')
Пример #2
0
import torch
from phantom_train_NN import Net
from data_load import *
import matplotlib.pyplot as plt
from time import time
from pdb import set_trace as bp

if __name__ == '__main__':
    # CUDA for PyTorch
    use_cuda = torch.cuda.is_available()
    device = torch.device("cuda:0" if use_cuda else "cpu")
    torch.backends.cudnn.benchmark = True

    # Network
    b_values_no0 = torch.FloatTensor(hp.b_values[1:])
    net = Net(b_values_no0).to(device)

    # Restore variables from disk
    net.load_state_dict(
        torch.load(hp.logdir + 'phantom/final_model.pt', map_location=device))

    Dp_pred = np.zeros((hp.num_samples, 10))
    Dt_pred = np.zeros((hp.num_samples, 10))
    Fp_pred = np.zeros((hp.num_samples, 10))

    Dp_error = np.zeros((hp.num_samples, 10))
    Dt_error = np.zeros((hp.num_samples, 10))
    Fp_error = np.zeros((hp.num_samples, 10))

    time_start = time()
    for noise_sd in range(15, 165, 15):
Пример #3
0
    # Training data
    print("Loading training data...")
    # df = pd.read_pickle('./data/sample_slice.pkl')
    df = pd.read_pickle('./data/sample_slice_3T.pkl')
    X = df.loc[:, ['b0', 'b10', 'b20', 'b60', 'b150', 'b300', 'b500', 'b1000']].values.astype('float32')
    # normalize dw signal
    S0 = X[:,0].copy()
    for i in range(len(hp.b_values)):
      X[:,i] = X[:,i]/S0
    X = X[:,1:]
    trainloader, num_batches = get_trainloader(X)
    print("Done")

    # Network
    b_values_no0 = torch.FloatTensor(hp.b_values[1:])
    net = Net(b_values_no0).to(device)

    # Restore variables from disk
    net.load_state_dict(torch.load(hp.logdir + 'phantom/final_model_Dp02.pt', map_location=device))

    # Loss function and optimizer
    criterion = nn.MSELoss().to(device)
    optimizer = optim.Adam(net.parameters(), lr = 0.001)  

    # Best loss
    best = 1e16
    num_bad_epochs = 0
    
    # Train
    for epoch in range(hp.num_epochs): 
      print("-----------------------------------------------------------------")