예제 #1
0
def golkov_multi(data, n_directions, random_seed=400):

    max_path = (
        "/v/raid1b/egibbons/MRIdata/DTI/noddi/max_values_%i_directions_1d.h5" %
        n_directions)
    maxs = readhdf5.read_hdf5(max_path, "max_values")[None, None, None, :]

    max_y_path = "/v/raid1b/egibbons/MRIdata/DTI/noddi/max_y_1d.h5"
    max_y = readhdf5.read_hdf5(max_y_path, "max_y")

    subsampling_pattern = subsampling.gensamples(n_directions,
                                                 random_seed=random_seed)

    image_size = (n_directions, )

    model = model1d_multi.fc_1d(image_size)
    model.compile(optimizer=Adam(lr=1e-3,
                                 beta_1=0.99,
                                 beta_2=0.995,
                                 epsilon=1e-08,
                                 decay=0.85),
                  loss="mean_absolute_error",
                  metrics=["accuracy"])
    model.load_weights("/v/raid1b/egibbons/models/noddi-%i_golkov_multi.h5" %
                       n_directions)
    print("golkov_multi model loaded")

    data_subsampled = data[:, :, :, subsampling_pattern]

    data_subsampled /= maxs

    dim0, dim1, n_slices, n_directions = data_subsampled.shape

    print("Predicting...")
    start = time.time()

    # data_subsampled_temp = data_subsampled.reshape(0,1)
    x = data_subsampled.reshape(n_slices * dim0 * dim1, -1)

    recon = model.predict(x, batch_size=10000)

    prediction = recon.reshape(dim0, dim1, n_slices, 4)

    for ii in range(4):
        prediction[:, :, :, ii] *= max_y[ii]

    print("Predictions completed...took: %f" % (time.time() - start))

    return prediction
예제 #2
0
def model_raw(data, n_directions, random_seed=400, loss_type="l1"):

    image_size = (128, 128, n_directions)

    model = simple2d.res2d(image_size)
    model.compile(optimizer=Adam(lr=1e-3),
                  loss="mean_absolute_error",
                  metrics=["accuracy"])
    model.load_weights("/v/raid1b/egibbons/models/noddi-%i_raw.h5" %
                       (n_directions))
    print("2D dense model loaded for raw data.  Using %s loss" % loss_type)

    max_path = (
        "/v/raid1b/egibbons/MRIdata/DTI/noddi/max_values_%i_directions_raw.h5"
        % n_directions)
    maxs = readhdf5.read_hdf5(max_path, "max_values")[None, None, None, :]

    max_y_path = "/v/raid1b/egibbons/MRIdata/DTI/noddi/max_y_raw.h5"
    max_y = readhdf5.read_hdf5(max_y_path, "max_y")

    subsampling_pattern = subsampling.gensamples(n_directions,
                                                 random_seed=random_seed)
    x = data[:, :, :, subsampling_pattern]
    x = x.transpose(2, 0, 1, 3)

    x /= maxs.squeeze()[None, None, None, :]

    print("Predicting 2D...")
    start = time.time()
    prediction = model.predict(x, batch_size=10).transpose(1, 2, 0, 3)
    print("Predictions completed...took: %f" % (time.time() - start))

    ### DISPLAY ###

    diffusivity_scaling = 1
    for ii in range(4):
        prediction[:, :, :, ii] *= max_y[ii]

    prediction[:, :, :, 3] /= diffusivity_scaling

    return prediction
예제 #3
0
def train(n_directions):

    loss_type = "l1"

    print("running 2D network with %s loss and %i directions" %
          (loss_type, n_directions))

    n_gpu = 1
    n_epochs = 100
    batch_size = 10
    learning_rate = 1e-3

    image_size = (128, 128, n_directions)

    model = simple2d.res2d(image_size)

    optimizer = Adam(lr=learning_rate)
    if loss_type == "l1":
        model.compile(optimizer=optimizer, loss="mean_absolute_error")
    else:
        model.compile(optimizer=optimizer, loss=network_utils.perceptual_loss)

    ### DATA LOADING ###
    x_path = ("/v/raid1b/egibbons/MRIdata/DTI/noddi/x_%i_directions_2d.h5" %
              n_directions)
    y_gfa_path = "/v/raid1b/egibbons/MRIdata/DTI/noddi/y_gfa_2d.h5"

    print("Loading data...")

    start = time.time()
    y_gfa = readhdf5.read_hdf5(y_gfa_path, "y_gfa")

    y = y_gfa

    x = readhdf5.read_hdf5(x_path, "x_%i_directions" % n_directions)

    print("Data is loaded...took: %f seconds" % (time.time() - start))

    ### MODEL FITTING ###
    batch_size_multi_gpu = n_gpu * batch_size

    tensorboard = TensorBoard(log_dir='./logs',
                              histogram_freq=1,
                              batch_size=batch_size_multi_gpu,
                              write_graph=True,
                              write_grads=True,
                              write_images=True,
                              embeddings_freq=0,
                              embeddings_layer_names=None,
                              embeddings_metadata=None)

    save_path = ("/v/raid1b/egibbons/models/noddi-%i_2d_gfa_no_scale.h5" %
                 (n_directions))
    print("saving to: %s" % save_path)
    checkpointer = ModelCheckpoint(filepath=save_path,
                                   verbose=1,
                                   save_best_only=True,
                                   save_weights_only=True,
                                   period=1)

    reduce_lr = ReduceLROnPlateau(monitor='val_loss',
                                  factor=0.1,
                                  patience=5,
                                  min_lr=1e-7)

    lrate = LearningRateScheduler(network_utils.step_decay)

    stopping = EarlyStopping(monitor='val_loss',
                             min_delta=0,
                             patience=20,
                             verbose=0,
                             mode='auto')

    model.fit(
        x=x,
        y=y,
        batch_size=batch_size_multi_gpu,
        epochs=n_epochs,
        verbose=2,
        callbacks=[checkpointer, lrate, stopping],
        validation_split=0.2,
        shuffle=True,
    )

    print("trained %i direction model" % n_directions)
예제 #4
0
sys.path.append("/home/mirl/egibbons/noddi")
from noddi_utils import metrics
from noddi_utils import noddistudy
from utils import display
from utils import readhdf5

test_cases = [
    "P032315", "P061815", "P020916", "N011118A", "N011118B", "P072216",
    "P082616"
]

directions = [128, 64, 32, 24, 16]

max_y_path = "/v/raid1b/egibbons/MRIdata/DTI/noddi/max_y_2d.h5"
max_y = readhdf5.read_hdf5(max_y_path, "max_y")

data = {}
models = ["2d", "separate_2d"]
data_types = ["odi", "fiso", "ficvf", "gfa"]
measurements = ["SSIM", "PSNR", "NRMSE"]

for model_type in models:
    data[model_type] = {}

    for data_type in data_types:
        data[model_type][data_type] = {}

        for measurement_type in measurements:
            data[model_type][data_type][measurement_type] = ([], [])
from utils import readhdf5

n_directions = 32

x_path = ("/v/raid1b/egibbons/MRIdata/DTI/noddi/x_%i_directions_2d.h5" %
          n_directions)
y_odi_path = "/v/raid1b/egibbons/MRIdata/DTI/noddi/y_odi_2d.h5"
y_fiso_path = "/v/raid1b/egibbons/MRIdata/DTI/noddi/y_fiso_2d.h5"
y_ficvf_path = "/v/raid1b/egibbons/MRIdata/DTI/noddi/y_ficvf_2d.h5"
y_gfa_path = "/v/raid1b/egibbons/MRIdata/DTI/noddi/y_gfa_2d.h5"

print("Loading data...")

start = time.time()
y_odi = readhdf5.read_hdf5(y_odi_path,"y_odi")
y_fiso = readhdf5.read_hdf5(y_fiso_path,"y_fiso")
y_ficvf = readhdf5.read_hdf5(y_ficvf_path,"y_ficvf")
y_gfa = readhdf5.read_hdf5(y_gfa_path,"y_gfa")

x = readhdf5.read_hdf5(x_path,"x_%i_directions" % n_directions).transpose(0,2,1,3)[:,::-1,::-1,:]
y = np.concatenate((y_odi, y_fiso, y_ficvf, y_gfa),
                   axis=3).transpose(0,2,1,3)[:,::-1,::-1,:]


for ii in range(x.shape[3]):
    plt.figure()
    plt.imshow(x[25,:,:,ii].squeeze(),cmap="gray")
    plt.axis("off")
    plt.axis("equal")
    plt.savefig("../results/x_%i.pdf" % ii, bbox_inches="tight")
예제 #6
0
def train(n_directions):
    print("running network with %i directions" % n_directions)

    n_gpu = 1
    n_epochs = 100
    batch_size = 10000
    learning_rate = 1e-3

    image_size = (n_directions, )

    model = model1d.fc_1d(image_size)

    optimizer = Adam(lr=learning_rate)

    model.compile(
        optimizer=optimizer,
        loss="mean_squared_error",
    )

    x_path = ("/v/raid1b/egibbons/MRIdata/DTI/noddi/x_%i_directions_1d.h5" %
              n_directions)
    y_odi_path = "/v/raid1b/egibbons/MRIdata/DTI/noddi/y_odi_1d.h5"
    y_fiso_path = "/v/raid1b/egibbons/MRIdata/DTI/noddi/y_fiso_1d.h5"
    y_ficvf_path = "/v/raid1b/egibbons/MRIdata/DTI/noddi/y_ficvf_1d.h5"
    y_gfa_path = "/v/raid1b/egibbons/MRIdata/DTI/noddi/y_gfa_1d.h5"

    print("Loading data...")
    x = readhdf5.read_hdf5(x_path, "x_%i_directions" % n_directions)
    y_odi = readhdf5.read_hdf5(y_odi_path, "y_odi")
    y_fiso = readhdf5.read_hdf5(y_fiso_path, "y_fiso")
    y_ficvf = readhdf5.read_hdf5(y_ficvf_path, "y_ficvf")
    y_gfa = readhdf5.read_hdf5(y_gfa_path, "y_gfa")
    print("Data is loaded...")

    n_samples, _ = x.shape

    print(y_odi.shape)
    print(y_fiso.shape)
    print(y_ficvf.shape)
    print(y_gfa.shape)

    y = np.concatenate((y_odi, y_fiso, y_ficvf, y_gfa), axis=1)

    batch_size_multi_gpu = n_gpu * batch_size

    tensorboard = TensorBoard(log_dir='./logs',
                              histogram_freq=1,
                              batch_size=batch_size_multi_gpu,
                              write_graph=True,
                              write_grads=True,
                              write_images=True,
                              embeddings_freq=0,
                              embeddings_layer_names=None,
                              embeddings_metadata=None)

    save_path = ("/v/raid1b/egibbons/models/noddi-%i_golkov_multi.h5" %
                 n_directions)
    checkpointer = ModelCheckpoint(filepath=save_path,
                                   verbose=1,
                                   monitor="val_loss",
                                   save_best_only=True,
                                   save_weights_only=True,
                                   period=25)

    reduce_lr = ReduceLROnPlateau(monitor="val_loss",
                                  factor=0.1,
                                  patience=5,
                                  min_lr=1e-7)

    stopping = EarlyStopping(monitor="val_loss",
                             min_delta=0,
                             patience=30,
                             verbose=0,
                             mode='auto')

    model.fit(
        x=x,
        y=y,
        batch_size=batch_size_multi_gpu,
        epochs=n_epochs,
        verbose=2,
        callbacks=[checkpointer, reduce_lr, stopping],
        validation_split=0.2,
        shuffle=True,
    )

    print("trained %i direction model" % n_directions)
예제 #7
0
def separate_2d(data,
                n_directions,
                random_seed=400,
                loss_type="l1",
                scaling=True):

    # load the data
    max_path = (
        "/v/raid1b/egibbons/MRIdata/DTI/noddi/max_values_%i_directions_2d.h5" %
        n_directions)
    maxs = readhdf5.read_hdf5(max_path, "max_values")[None, None, None, :]

    max_y_path = "/v/raid1b/egibbons/MRIdata/DTI/noddi/max_y_2d.h5"
    max_y = readhdf5.read_hdf5(max_y_path, "max_y")

    subsampling_pattern = subsampling.gensamples(n_directions,
                                                 random_seed=random_seed)
    x = data[:, :, :, subsampling_pattern]
    x = x.transpose(2, 0, 1, 3)

    x /= maxs.squeeze()[None, None, None, :]

    x_noddi = np.copy(x)
    x_gfa = np.copy(x)

    image_size = (128, 128, n_directions)

    # noddi model
    model_noddi = simple2d_noddi.res2d(image_size)
    model_noddi.compile(optimizer=Adam(lr=1e-3),
                        loss="mean_absolute_error",
                        metrics=["accuracy"])
    model_noddi.load_weights("/v/raid1b/egibbons/models/noddi-%i_2d_noddi.h5" %
                             (n_directions))

    model_gfa = simple2d_gfa.res2d(image_size)
    model_gfa.compile(optimizer=Adam(lr=1e-3),
                      loss="mean_absolute_error",
                      metrics=["accuracy"])

    if scaling is True:
        model_gfa.load_weights("/v/raid1b/egibbons/models/noddi-%i_2d_gfa.h5" %
                               (n_directions))
        scaling_factor = 5
    else:
        print("no scaling")
        model_gfa.load_weights(
            "/v/raid1b/egibbons/models/noddi-%i_2d_gfa_no_scale.h5" %
            (n_directions))
        scaling_factor = 1

    print("2D dense model loaded.  Using %s loss" % loss_type)

    print("Predicting 2D separate...")
    start = time.time()
    prediction_noddi = model_noddi.predict(x_noddi, batch_size=10).transpose(
        1, 2, 0, 3)
    prediction_gfa = model_gfa.predict(x_gfa,
                                       batch_size=10).transpose(1, 2, 0, 3)
    print("Predictions completed...took: %f" % (time.time() - start))

    prediction = np.concatenate(
        (prediction_noddi, prediction_gfa / scaling_factor), axis=3)

    ### DISPLAY ###
    for ii in range(4):
        prediction[:, :, :, ii] *= max_y[ii]

    return prediction
예제 #8
0
from noddi_utils import noddistudy
from noddi_utils import subsampling
from noddi_utils import predict
from utils import display
from utils import readhdf5

loss_type = "l1"

n_channels = 64

### LOAD DATA ###
patient_number = "P111816"
noddi_data = noddistudy.NoddiData(patient_number)

max_y_path = "/v/raid1b/egibbons/MRIdata/DTI/noddi/max_y_2d.h5"
max_y = readhdf5.read_hdf5(max_y_path, "max_y")

data_full = noddi_data.get_full()
data_odi = noddi_data.get_odi()
data_fiso = noddi_data.get_fiso()
data_ficvf = noddi_data.get_ficvf()
data_gfa = noddi_data.get_gfa()
# data_md = noddi_data.get_md()
# data_ad = noddi_data.get_ad()
# data_fa = noddi_data.get_fa()

data_odi[data_odi > max_y[0]] = max_y[0]
data_fiso[data_fiso > max_y[1]] = max_y[1]
data_ficvf[data_ficvf > max_y[2]] = max_y[2]
data_gfa[data_gfa > max_y[3]] = max_y[3]
# data_md[data_md>max_y[4]] = max_y[4]
예제 #9
0
import numpy as np

sys.path.append("/home/mirl/egibbons/noddi")
from noddi_utils import noddistudy
from recon import imtools
from utils import matutils
from utils import readhdf5

patient_number = "P080715"

slice_use = 20

directions = [128, 64, 32, 24, 16, 8]

max_y_path = "/v/raid1b/egibbons/MRIdata/DTI/noddi/max_y_2d.h5"
max_y = readhdf5.read_hdf5(max_y_path, "max_y")

data = {}
models = ["2d", "separate_no_scale_2d"]

noddi_data = noddistudy.NoddiData(patient_number)
data_gfa = noddi_data.get_gfa()[:, :, slice_use].transpose(1, 0)[::-1, ::-1]
data_gfa[data_gfa > max_y[3]] = max_y[3]

crop_factor = 100
roll_factor = -5
data_gfa = np.roll(data_gfa, roll_factor, axis=0)
data_gfa = imtools.Crop(data_gfa, crop_factor, crop_factor)
data_gfa /= 0.5

range_gfa = np.amax(data_gfa) - np.amin(data_gfa)
예제 #10
0
n_directions = 24

sampling_pattern = subsampling.gensamples(n_directions)

### LOAD DATA ###
patient_number = test_cases[0]
noddi_data = noddistudy.NoddiData(patient_number)

data_full = noddi_data.get_full()
data_odi = noddi_data.get_odi().transpose(1,0,2)[::-1,::-1]
data_fiso = noddi_data.get_fiso().transpose(1,0,2)[::-1,::-1]
data_ficvf = noddi_data.get_ficvf().transpose(1,0,2)[::-1,::-1]
data_gfa = noddi_data.get_gfa().transpose(1,0,2)[::-1,::-1]

max_y_path = "/v/raid1b/egibbons/MRIdata/DTI/noddi/max_y_2d.h5"
max_y = readhdf5.read_hdf5(max_y_path,"max_y")

### LOAD MODEL ###

image_size = (128,128,n_directions)

model = simple2d.res2d(image_size)
model.compile(optimizer=Adam(lr=1e-3),
              loss="mean_absolute_error",
              metrics=["accuracy"])
model.load_weights("noddi_test-%i_2d.h5" %
                   (n_directions))
print("2D dense model loaded.  Using %s loss" % loss_type)

    
max_path = ("/v/raid1b/egibbons/MRIdata/DTI/noddi/"