Beispiel #1
0
def infer(batch_size=2):
    # On server with PET and PCT in
    image_dir = "/hepgpu3-data1/dmcsween/DataTwoWay128/fixed"
    print("Load Data")
    image_data, __image, __label = load.data_reader(image_dir, image_dir, image_dir)

    image_array, image_affine = image_data.get_data()
    moving_array, moving_affine = __image.get_data()
    dvf_array, dvf_affine = __label.get_data()

    list_avail_keys = help.get_moveable_keys(image_array)
    # Get hamming set
    print("Load hamming Set")
    hamming_set = pd.read_csv("hamming_set.txt", sep=",", header=None)
    print(hamming_set)
    # Ignore moving and dvf
    validation_dataset, validation_moving, validation_dvf, train_dataset, train_moving, train_dvf = helper.split_data(
        image_array, moving_array, dvf_array, split_ratio=0.15)
    print("Valid Shape:", validation_dataset.shape)
    normalised_dataset = helper.normalise(validation_dataset)
    print('Load models')
    idx_list = [0, 9]
    K.clear_session()
    model = load_model('./logs/best_model.h5')
    myPredictGen = gen.predict_generator(
        normalised_dataset, list_avail_keys, hamming_set, hamming_idx=idx_list, batch_size=batch_size, N=10)
    opt = optimizers.SGD(lr=0.01, momentum=0.9)
    model.compile(optimizer=opt, loss='categorical_crossentropy', metrics=["accuracy"])
    output = model.predict_generator(generator=myPredictGen, steps=1, verbose=1)
    print(output)
Beispiel #2
0
def train(batch_size=2):
    # Load DATA
    fixed_image, moving_image, dvf_label = load.data_reader(fixed_dir, moving_dir, dvf_dir)

    # Turn into numpy arrays
    fixed_array, fixed_affine = fixed_image.get_data()
    moving_array, moving_affine = moving_image.get_data()
    dvf_array, dvf_affine = dvf_label.get_data(is_image=False)
    # Shuffle arrays
    fixed_array, moving_array, dvf_array = helper.shuffle_inplace(
        fixed_array, moving_array, dvf_array)
    fixed_affine, moving_affine, dvf_affine = helper.shuffle_inplace(
        fixed_affine, moving_affine, dvf_affine)
    # Split into test and training set
    # Training/Validation/Test = 80/15/5 split
    test_fixed, test_moving, test_dvf, train_fixed, train_moving, train_dvf = helper.split_data(
        fixed_array, moving_array, dvf_array, split_ratio=0.05)
    # Test affine
    test_fixed_affine, test_moving_affine, test_dvf_affine, train_fixed_affine, train_moving_affine, train_dvf_affine = helper.split_data(
        fixed_affine, moving_affine, dvf_affine, split_ratio=0.05)
    # Split training into validation and training set
    validation_fixed, validation_moving, validation_dvf, train_fixed, train_moving, train_dvf = helper.split_data(
        train_fixed, train_moving, train_dvf, split_ratio=0.15)

    print("PCT Shape:", train_fixed.shape)
    print("PET Shape:", train_moving.shape)
    print("DVF Shape:", train_dvf.shape)
    outputPath = './transfer_logs/'
    # Callbacks
    reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.2,
                                  patience=5)
    history = LossHistory()
    checkpoint = ModelCheckpoint(outputPath + 'best_model.h5', monitor='val_loss',
                                 verbose=1, save_best_only=True, period=1)
    tensorboard = TrainValTensorBoard(write_graph=False, log_dir=outputPath)
    callbacks = [reduce_lr, history, checkpoint, tensorboard]

    # Train
    model = buildNet(train_fixed.shape[1:])
    for layer in model.layers:
        print(layer.name, layer.output_shape)

    print(model.summary())
    plot_model(model, to_file=outputPath + 'model.png', show_shapes=True)
    opt = optimizers.Adam(lr=0.0001)
    model.compile(optimizer=opt, loss='mean_squared_error')
    model.fit_generator(generator=helper.generator(inputs=[train_fixed, train_moving], label=train_dvf, batch_size=batch_size),
                        steps_per_epoch=math.ceil(train_fixed.shape[0]/batch_size),
                        epochs=500, verbose=1,
                        callbacks=callbacks,
                        validation_data=helper.generator(
                            inputs=[validation_fixed, validation_moving], label=validation_dvf, batch_size=batch_size),
                        validation_steps=math.ceil(validation_fixed.shape[0]/batch_size))

    # accuracy = model.evaluate_generator(generator(
    #    inputs=[validation_fixed, validation_moving], label=validation_dvf, batch_size=batch_size), steps=1, verbose=1)
    model.save(outputPath + 'model.h5')
Beispiel #3
0
def train():
    # Load DATA
    fixed_image, moving_image, dvf_label = load.data_reader(
        fixed_dir, moving_dir, dvf_dir)

    # Turn into numpy arrays
    fixed_array, fixed_affine = fixed_image.get_data()
    moving_array, moving_affine = moving_image.get_data()
    dvf_array, dvf_affine = dvf_label.get_data(is_image=False)
    # Shuffle arrays
    fixed_array, moving_array, dvf_array = helper.shuffle_inplace(
        fixed_array, moving_array, dvf_array)
    fixed_affine, moving_affine, dvf_affine = helper.shuffle_inplace(
        fixed_affine, moving_affine, dvf_affine)
    # Split into test and training set
    # Training/Validation/Test = 80/15/5 split
    test_fixed, test_moving, test_dvf, train_fixed, train_moving, train_dvf = helper.split_data(
        fixed_array, moving_array, dvf_array, split_ratio=0.05)
    # Test affine
    test_fixed_affine, test_moving_affine, test_dvf_affine, train_fixed_affine, train_moving_affine, train_dvf_affine = helper.split_data(
        fixed_affine, moving_affine, dvf_affine, split_ratio=0.05)
    # Split training into validation and training set
    validation_fixed, validation_moving, validation_dvf, train_fixed, train_moving, train_dvf = helper.split_data(
        train_fixed, train_moving, train_dvf, split_ratio=0.15)

    print("PCT Shape:", train_fixed.shape)
    print("PET Shape:", train_moving.shape)
    print("DVF Shape:", train_dvf.shape)

    # CNN Structure
    fixed_image = Input(
        shape=(train_fixed.shape[1:]))  # Ignore batch but include channel
    moving_image = Input(shape=(train_moving.shape[1:]))

    # Correlation layers
    correlation_out = myLayer.correlation_layer(fixed_image,
                                                moving_image,
                                                shape=train_fixed.shape[1:4],
                                                max_displacement=20,
                                                stride=2)

    x1 = Conv3D(64, (3, 3, 3),
                strides=2,
                activation=activation,
                padding='same',
                name='downsample1')(correlation_out)
    x1 = Conv3D(32, (3, 3, 3),
                strides=2,
                activation=activation,
                padding='same',
                name='downsample2')(x1)
    x1 = Conv3D(16, (3, 3, 3),
                strides=2,
                activation=activation,
                padding='same',
                name='downsample3')(x1)
    x1 = BatchNormalization(axis=-1, momentum=momentum)(x1)

    x1 = Conv3D(64, (3, 3, 3),
                activation=activation,
                padding='same',
                name='down_1a')(x1)
    x1 = Conv3D(64, (3, 3, 3),
                activation=activation,
                padding='same',
                name='down_1b')(x1)
    x1 = Conv3D(64, (3, 3, 3),
                activation=activation,
                padding='same',
                name='down_1c')(x1)
    x1 = BatchNormalization(axis=-1, momentum=momentum)(x1)

    x = MaxPooling3D(pool_size=(2, 2, 2), padding='same', name='Pool_1')(x1)

    x2 = Conv3D(128, (3, 3, 3),
                activation=activation,
                padding='same',
                name='down_2a')(x)
    x2 = Conv3D(128, (3, 3, 3),
                activation=activation,
                padding='same',
                name='down_2b')(x2)
    x2 = Conv3D(128, (3, 3, 3),
                activation=activation,
                padding='same',
                name='down_2c')(x2)
    x2 = BatchNormalization(axis=-1, momentum=momentum)(x2)

    x = MaxPooling3D(pool_size=(2, 2, 2), padding='same', name='Pool_2')(x2)

    x3 = Conv3D(256, (3, 3, 3),
                activation=activation,
                padding='same',
                name='down_3a')(x)
    x3 = Conv3D(256, (3, 3, 3),
                activation=activation,
                padding='same',
                name='down_3b')(x3)
    x3 = BatchNormalization(axis=-1, momentum=momentum)(x3)

    x = MaxPooling3D(pool_size=(2, 2, 2), padding='same', name='Pool_3')(x3)

    x4 = Conv3D(512, (3, 3, 3),
                activation=activation,
                padding='same',
                name='down_4a')(x)

    x = UpSampling3D(size=(2, 2, 2), name='UpSamp_4')(x4)
    y3 = Conv3DTranspose(256, (3, 3, 3),
                         activation=activation,
                         padding='same',
                         name='Up_3a')(x)
    y3 = Conv3DTranspose(256, (3, 3, 3),
                         activation=activation,
                         padding='same',
                         name='Up_3b')(y3)
    y3 = Conv3DTranspose(256, (3, 3, 3),
                         activation=activation,
                         padding='same',
                         name='Up_3c')(y3)
    y3 = BatchNormalization()(y3)

    merge3 = concatenate([x3, y3])

    x = UpSampling3D(size=(2, 2, 2), name='UpSamp_3')(merge3)
    y2 = Conv3DTranspose(128, (3, 3, 3),
                         activation=activation,
                         padding='same',
                         name='Up_2a')(x)
    y2 = Conv3DTranspose(128, (3, 3, 3),
                         activation=activation,
                         padding='same',
                         name='Up_2b')(y2)
    y2 = Conv3DTranspose(128, (3, 3, 3),
                         activation=activation,
                         padding='same',
                         name='Up_2c')(y2)
    y2 = BatchNormalization(axis=-1, momentum=momentum)(y2)

    merge2 = concatenate([x2, y2])

    x = UpSampling3D(size=(2, 2, 2), name='UpSamp_2')(merge2)
    y1 = Conv3DTranspose(64, (3, 3, 3),
                         activation=activation,
                         padding='same',
                         name='Up_1a')(x)
    y1 = Conv3DTranspose(64, (3, 3, 3),
                         activation=activation,
                         padding='same',
                         name='Up_1b')(y1)
    y1 = Conv3DTranspose(64, (3, 3, 3),
                         activation=activation,
                         padding='same',
                         name='Up_1c')(y1)
    y1 = BatchNormalization(axis=-1, momentum=momentum)(y1)

    merge1 = concatenate([x1, y1])

    # Transform into flow field (from VoxelMorph Github)
    upsample = Conv3DTranspose(64, (3, 3, 3),
                               strides=2,
                               activation=activation,
                               padding='same',
                               name='upsample_dvf1')(merge1)
    upsample = Conv3DTranspose(64, (3, 3, 3),
                               strides=2,
                               activation=activation,
                               padding='same',
                               name='upsample_dvf2')(upsample)
    upsample = Conv3DTranspose(64, (3, 3, 3),
                               strides=2,
                               activation=activation,
                               padding='same',
                               name='upsample_dvf3')(upsample)
    upsample = BatchNormalization(axis=-1, momentum=momentum)(upsample)

    dvf = Conv3D(64,
                 kernel_size=3,
                 activation=activation,
                 padding='same',
                 name='dvf_64features')(upsample)
    #dvf = Conv3D(3, kernel_size=3, activation=activation, padding='same', name='dvf')(dvf)
    dvf = Conv3D(3, kernel_size=1, activation=None, padding='same',
                 name='dvf')(dvf)

    # Callbacks
    reduce_lr = ReduceLROnPlateau(monitor='val_loss',
                                  factor=0.2,
                                  patience=5,
                                  min_lr=0.00001)
    history = LossHistory()
    checkpoint = ModelCheckpoint('best_model.h5',
                                 monitor='val_loss',
                                 verbose=1,
                                 save_best_only=True,
                                 period=1)
    tensorboard = TrainValTensorBoard(write_graph=False)
    callbacks = [reduce_lr, history, checkpoint, tensorboard]

    # Train
    model = Model(inputs=[fixed_image, moving_image], outputs=dvf)
    for layer in model.layers:
        print(layer.name, layer.output_shape)

    # print(model.summary())
    plot_model(model, to_file='model.png')
    #Adam = optimizers.Adam(lr=0.001)
    model.compile(optimizer='Adam', loss='mean_squared_error')
    model.fit_generator(
        generator=helper.generator(inputs=[train_fixed, train_moving],
                                   label=train_dvf,
                                   batch_size=batch_size),
        steps_per_epoch=math.ceil(train_fixed.shape[0] / batch_size),
        epochs=75,
        verbose=1,
        callbacks=callbacks,
        validation_data=helper.generator(
            inputs=[validation_fixed, validation_moving],
            label=validation_dvf,
            batch_size=batch_size),
        validation_steps=math.ceil(validation_fixed.shape[0] / batch_size))

    # accuracy = model.evaluate_generator(generator(
    #    inputs=[validation_fixed, validation_moving], label=validation_dvf, batch_size=batch_size), steps=1, verbose=1)
    model.save('model.h5')
    """Testing to see where issue with DVF is """
    dvf = model.predict(helper.generator([test_fixed, test_moving],
                                         label=test_dvf,
                                         predict=True,
                                         batch_size=1),
                        steps=math.ceil(test_fixed.shape[0] / batch_size),
                        verbose=1)
    helper.write_images(test_fixed,
                        test_fixed_affine,
                        file_path='./outputs/',
                        file_prefix='fixed')
    helper.write_images(test_moving,
                        test_moving_affine,
                        file_path='./outputs/',
                        file_prefix='moving')
    helper.write_images(dvf,
                        test_fixed_affine,
                        file_path='./outputs/',
                        file_prefix='dvf')
Beispiel #4
0
def get_data(fixed_dir, moving_dir, dvf_dir):
    # Load data from directory
    fixed, moving, dvf = load.data_reader(fixed_dir, moving_dir, dvf_dir)
    fixed_array, fixed_affine = fixed.get_data()
    return fixed_array, fixed_affine
Beispiel #5
0
def train(tileSize=64, numPuzzles=23, num_permutations=10, batch_size=16):
    # On server with PET and PCT in
    image_dir = "/hepgpu3-data1/dmcsween/Data128/ResampleData/PlanningCT"

    print("Load Data")
    image_data, __image, __label = load.data_reader(image_dir, image_dir,
                                                    image_dir)

    image_array, image_affine = image_data.get_data()
    moving_array, moving_affine = __image.get_data()
    dvf_array, dvf_affine = __label.get_data()
    """
    list_avail_keys = help.get_moveable_keys(image_array)
    hamming_set = pd.read_csv(
        "hamming_set_PCT.txt", sep=",", header=None)
    """
    avail_keys = pd.read_csv("avail_keys_both.txt", sep=",", header=None)
    print("Len keys:", len(avail_keys))
    list_avail_keys = [(avail_keys.loc[i, 0], avail_keys.loc[i, 1],
                        avail_keys.loc[i, 2]) for i in range(len(avail_keys))]
    print(list_avail_keys)
    # Get hamming set
    print("Load hamming Set")
    hamming_set = pd.read_csv("hamming_set.txt", sep=",", header=None)

    #hamming_set = hamming_set.loc[:9]
    print("Ham Len", len(hamming_set))
    print(hamming_set)

    fixed_array, moving_array, dvf_array = helper.shuffle_inplace(
        image_array, moving_array, dvf_array)

    # Ignore moving and dvf
    validation_dataset, validation_moving, validation_dvf, train_dataset, train_moving, train_dvf = helper.split_data(
        fixed_array, moving_array, dvf_array, split_ratio=0.15)

    normalised_train = helper.norm(train_dataset)
    normalised_val = helper.norm(validation_dataset)
    # Output all data from a training session into a dated folder
    outputPath = "./logs"
    # hamming_list = [0, 1, 2, 3, 4]
    # img_idx = [0, 1, 2, 3, 4]
    # callbacks
    checkpoint = ModelCheckpoint(outputPath + '/best_model.h5',
                                 monitor='val_acc',
                                 verbose=1,
                                 save_best_only=True,
                                 period=1)
    reduce_lr_plateau = ReduceLROnPlateau(monitor='val_acc',
                                          patience=10,
                                          verbose=1)
    # early_stop = EarlyStopping(monitor='val_acc', patience=5, verbose=1)
    tensorboard = TrainValTensorBoard(write_graph=False)
    callbacks = [checkpoint, reduce_lr_plateau, tensorboard]
    # BUILD Model
    model = createSharedAlexnet3D_onemodel()
    # for layer in model.layers:
    #     print(layer.name, layer.output_shape)
    opt = optimizers.SGD(lr=0.01)
    plot_model(model, to_file='model.png')
    print(model.summary())
    model.compile(optimizer=opt,
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])

    model.fit_generator(generator=gen.generator(normalised_train,
                                                list_avail_keys,
                                                hamming_set,
                                                batch_size=batch_size,
                                                N=num_permutations),
                        epochs=1000,
                        verbose=1,
                        steps_per_epoch=normalised_train.shape[0] //
                        batch_size,
                        validation_data=gen.generator(normalised_val,
                                                      list_avail_keys,
                                                      hamming_set,
                                                      batch_size=batch_size,
                                                      N=num_permutations),
                        validation_steps=normalised_val.shape[0] // batch_size,
                        callbacks=callbacks,
                        shuffle=False)
    model.save('model_best.h5')
Beispiel #6
0
def inference():
    print('Load data to Transform')
    fixed_predict, moving_predict, dvf_label = load.data_reader(
        fixed_dir, moving_dir, dvf_dir)

    print('Turn into numpy arrays')
    fixed_array, fixed_affine = fixed_predict.get_data()
    moving_array, moving_affine = moving_predict.get_data()
    dvf_array, dvf_affine = dvf_label.get_data(is_image=False)

    print('Shuffle')
    fixed_array, moving_array, dvf_array = helper.shuffle_inplace(
        fixed_array, moving_array, dvf_array)
    fixed_affine, moving_affine, dvf_affine = helper.shuffle_inplace(
        fixed_affine, moving_affine, dvf_affine)

    print('Split into test/training data')
    test_fixed, test_moving, test_dvf, train_fixed, train_moving, train_dvf = helper.split_data(
        fixed_array, moving_array, dvf_array, split_ratio=0.05)
    test_fixed_affine, test_moving_affine, test_dvf_affine, train_fixed_affine, train_moving_affine, train_dvf_affine = helper.split_data(
        fixed_affine, moving_affine, dvf_affine, split_ratio=0.05)

    print('Load models')
    print("Fixed input", test_fixed.shape)
    print("Moving input", test_moving.shape)
    model = load_model('best_model.h5')
    model.compile(optimizer='Adam',
                  loss='mean_squared_error',
                  metrics=["accuracy"])
    dvf = model.predict_generator(helper.generator([test_fixed, test_moving],
                                                   label=test_dvf,
                                                   predict=True,
                                                   batch_size=batch_size),
                                  steps=math.ceil(test_fixed.shape[0] /
                                                  batch_size),
                                  verbose=1)
    test_loss = model.evaluate_generator(
        helper.generator([test_fixed, test_moving],
                         label=test_dvf,
                         predict=True,
                         batch_size=batch_size),
        steps=math.ceil(test_fixed.shape[0] / batch_size),
        verbose=1)

    print('Save DVF')
    # Save images
    helper.write_images(test_fixed,
                        test_fixed_affine,
                        file_path='./outputs/',
                        file_prefix='fixed')
    helper.write_images(test_moving,
                        test_moving_affine,
                        file_path='./outputs/',
                        file_prefix='moving')
    helper.write_images(dvf,
                        test_fixed_affine,
                        file_path='./outputs/',
                        file_prefix='dvf')
    print("Test Loss:", test_loss)
    # Save warped
    print("Test Loss Shape:", test_loss.shape)
Beispiel #7
0
def infer(batch_size=2):
    # On server with PET and PCT in
    image_dir = "/hepgpu3-data1/dmcsween/DataTwoWay128/fixed"
    #image_dir = "/hepgpu3-data1/dmcsween/Data128/ResampleData/PlanningCT"
    inputPath = "./all_logs/both_logs100perms"
    #inputPath = './mixed_hamming_logs'
    print("Load Data")
    image_data, __image, __label = load.data_reader(image_dir, image_dir,
                                                    image_dir)

    image_array, image_affine = image_data.get_data()
    moving_array, moving_affine = __image.get_data()
    dvf_array, dvf_affine = __label.get_data()
    """
    list_avail_keys = help.get_moveable_keys(image_array)
    # Get hamming set
    print("Load hamming Set")
    hamming_set = pd.read_csv("hamming_set.txt", sep=",", header=None)
    print(hamming_set)
    """
    avail_keys = pd.read_csv("avail_keys_both.txt", sep=",", header=None)
    list_avail_keys = [(avail_keys.loc[i, 0], avail_keys.loc[i, 1],
                        avail_keys.loc[i, 2]) for i in range(len(avail_keys))]
    # Get hamming set
    print("Load hamming Set")
    hamming_set = pd.read_csv("mixed_hamming_set.txt", sep=",", header=None)

    hamming_set = hamming_set.loc[:99]
    # Ignore moving and dvf
    test_dataset, validation_moving, validation_dvf, trainVal_dataset, train_moving, train_dvf = helper.split_data(
        image_array, moving_array, dvf_array, split_ratio=0.05)
    print("Valid Shape:", test_dataset.shape)
    normalised_dataset = helper.normalise(test_dataset)
    print('Load models')
    scores = np.zeros((15, 20))
    blank_idx = [n for n in range(23)]
    print(blank_idx)
    K.clear_session()
    model = load_model(inputPath + '/final_model.h5')
    opt = optimizers.SGD(lr=0.01)
    model.compile(optimizer=opt,
                  loss='categorical_crossentropy',
                  metrics=["accuracy"])
    idx_list = []
    # i is border size
    for i in range(15):
        for j in range(20):
            idx_list = [10, 10]
            print("Pre Eval:", i, j)
            myPredictGen = gen.evaluate_generator(normalised_dataset,
                                                  list_avail_keys,
                                                  hamming_set,
                                                  hamming_idx=idx_list,
                                                  batch_size=batch_size,
                                                  blank_idx=blank_idx,
                                                  border_size=i,
                                                  image_idx=[10, 10],
                                                  full_crop=False,
                                                  out_crop=True,
                                                  inner_crop=False,
                                                  N=100)
            accuracy = model.evaluate_generator(generator=myPredictGen,
                                                steps=5,
                                                verbose=1)
            print("%s: %.2f%%" % (model.metrics_names[1], accuracy[1] * 100))
            scores[i, j] = (accuracy[1] * 100)

    np.savetxt("scores_diff_border.txt", scores, delimiter=",", fmt='%1.2i')
    avg_score = np.mean(scores, axis=1)
    avg_perm = np.mean(scores, axis=0)
    error_score = np.std(scores, axis=1)
    error_perm = np.std(scores, axis=0)
    var_score = np.var(scores, axis=1)
    var_perm = np.var(scores, axis=0)
    print("Scores:", avg_score, error_score, var_score)
    print("Perms:", avg_perm, error_perm, var_perm)
    print("Done")