def test_networks_unet():
    device = 'cuda:0' if torch.cuda.is_available() else 'cpu'

    model = unet(3, 1)
    model.to(device)

    assert torchsummary.summary(model, image_size)
def cremi_unet(name='unet', sample_to_isotropy=False):
    in_shape = (84, 268, 268)
    n_channels = 12

    # These values reproduce jans network
    initial_fmaps = 12
    fmap_increase = 5
    downsample_factors = [[1, 3, 3], [1, 3, 3], [3, 3, 3]] if sample_to_isotropy else \
        [[1, 3, 3], [1, 3, 3], [1, 3, 3]]

    raw = tf.placeholder(tf.float32, shape=in_shape)
    raw_batched = tf.reshape(raw, (
        1,
        1,
    ) + in_shape)

    unet = networks.unet(raw_batched, initial_fmaps, fmap_increase,
                         downsample_factors)

    affs_batched = networks.conv_pass(unet,
                                      kernel_size=1,
                                      num_fmaps=n_channels,
                                      num_repetitions=1,
                                      activation='sigmoid')

    output_shape_batched = affs_batched.get_shape().as_list()
    output_shape = output_shape_batched[1:]  # strip the batch dimension

    affs = tf.reshape(affs_batched, output_shape)

    gt_affs = tf.placeholder(tf.float32, shape=output_shape)

    loss_weights = tf.placeholder(tf.float32, shape=output_shape)

    loss = tf.losses.mean_squared_error(gt_affs, affs, loss_weights)
    tf.summary.scalar('loss_total', loss)

    opt = tf.train.AdamOptimizer(learning_rate=0.5e-4,
                                 beta1=0.95,
                                 beta2=0.999,
                                 epsilon=1e-8)
    optimizer = opt.minimize(loss)
    #for trainable in tf.trainable_variables():
    #    networks.tf_var_summary(trainable)
    merged = tf.summary.merge_all()

    tf.train.export_meta_graph(filename='%s.meta' % name)

    names = {
        'raw': raw.name,
        'affs': affs.name,
        'gt_affs': gt_affs.name,
        'loss_weights': loss_weights.name,
        'loss': loss.name,
        'optimizer': optimizer.name,
        'summary': merged.name
    }

    with open('net_io_names.json', 'w') as f:
        json.dump(names, f)
Esempio n. 3
0
def train(model_name, gpu_id):

    model_dir = '../models/' + model_name
    if not os.path.isdir(model_dir):
        os.mkdir(model_dir)

    gpu = '/gpu:' + str(gpu_id)
    os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_id)
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    config.allow_soft_placement = True
    set_session(tf.Session(config=config))

    with tf.device(gpu):
        model = networks.unet(vol_size, nf_enc, nf_dec)
        model.compile(optimizer=Adam(lr=lr), loss=[
                      losses.cc3D(), losses.gradientLoss('l2')], loss_weights=[1.0, reg_param])
        # model.load_weights('../models/udrnet2/udrnet1_1/120000.h5')

    train_example_gen = datagenerators.example_gen(train_vol_names)
    zero_flow = np.zeros((1, vol_size[0], vol_size[1], vol_size[2], 3))

    for step in xrange(0, n_iterations):

        X = train_example_gen.next()[0]
        train_loss = model.train_on_batch(
            [X, atlas_vol], [atlas_vol, zero_flow])

        if not isinstance(train_loss, list):
            train_loss = [train_loss]

        printLoss(step, 1, train_loss)

        if(step % model_save_iter == 0):
            model.save(model_dir + '/' + str(step) + '.h5')
Esempio n. 4
0
def train(model, gpu_id, lr, n_iterations, reg_param, model_save_iter,
          load_iter):

    model_dir = '/home/ys895/MAS3_Models'
    if not os.path.isdir(model_dir):
        os.mkdir(model_dir)

    gpu = '/gpu:' + str(gpu_id)
    os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_id)
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    config.allow_soft_placement = True
    set_session(tf.Session(config=config))

    # UNET filters
    nf_enc = [16, 32, 32, 32]
    if (model == 'vm1'):
        nf_dec = [32, 32, 32, 32, 8, 8, 3]
    else:
        nf_dec = [32, 32, 32, 32, 32, 16, 16, 3]

    with tf.device(gpu):
        model = networks.unet(vol_size, nf_enc, nf_dec)
        if (load_iter != 0):
            model.load_weights('/home/ys895/MAS3_Models/' + str(load_iter) +
                               '.h5')

        model.compile(optimizer=Adam(lr=lr),
                      loss=[losses.cc3D(),
                            losses.gradientLoss('l2')],
                      loss_weights=[1.0, reg_param])
        # model.load_weights('../models/udrnet2/udrnet1_1/120000.h5')

    # return the data, add one more dimension into the data
    train_example_gen = datagenerators.example_gen(train_vol_names)
    zero_flow = np.zeros((1, vol_size[0], vol_size[1], vol_size[2], 3))

    # In this part, the code inputs the data into the model
    # Before this part, the model was set
    for step in range(1, n_iterations + 1):
        # choose randomly one of the atlas from the atlas_list
        rand_num = random.randint(0, list_num - 1)
        atlas_vol = atlas_list[rand_num]

        #Parameters for training : X(train_vol) ,atlas_vol(atlas) ,zero_flow
        X = train_example_gen.__next__()[0]
        train_loss = model.train_on_batch([atlas_vol, X], [X, zero_flow])

        if not isinstance(train_loss, list):
            train_loss = [train_loss]

        printLoss(step, 1, train_loss)

        if (step % model_save_iter == 0):
            model.save(model_dir + '/' + str(load_iter + step) + '.h5')
def model_factory(model_name, x, dropout, is_training, weight_decay, crop,
                  num_input_bands, num_classes, crop_size):
    if model_name == 'dilated_grsl':
        logits = dilated_grsl(x, is_training, weight_decay, crop,
                              num_input_bands, num_classes)
    elif model_name == 'dilated_icpr_rate6':
        logits = dilated_icpr_rate6(x, is_training, weight_decay, crop,
                                    num_input_bands, num_classes)
    elif model_name == 'dilated_icpr_rate6_densely':
        logits = dilated_icpr_rate6_densely(x, is_training, weight_decay, crop,
                                            num_input_bands, num_classes)
    elif model_name == 'dilated_grsl_rate8':
        logits = dilated_grsl_rate8(x, is_training, weight_decay, crop,
                                    num_input_bands, num_classes)

    elif model_name == 'fcn_25_1_4x':
        logits = fcn_25_1_4x(x, dropout, is_training, crop, weight_decay,
                             num_input_bands, num_classes)
    elif model_name == 'fcn_25_2_2x':
        logits = fcn_25_2_2x(x, dropout, is_training, crop, weight_decay,
                             num_input_bands, num_classes)
    elif model_name == 'fcn_25_3_2x_icpr':
        logits = fcn_25_3_2x_icpr(x, dropout, is_training, crop, weight_decay,
                                  num_input_bands, num_classes)
    elif model_name == 'fcn_50_1_8x':
        logits = fcn_50_1_8x(x, dropout, is_training, crop, weight_decay,
                             num_input_bands, num_classes)
    elif model_name == 'fcn_50_2_4x':
        logits = fcn_50_2_4x(x, dropout, is_training, crop, weight_decay,
                             num_input_bands, num_classes)
    elif model_name == 'fcn_50_3_2x':
        logits = fcn_50_3_2x(x, dropout, is_training, crop, weight_decay,
                             num_input_bands, num_classes)

    elif model_name == 'pixelwise':
        logits = pixelwise(x, dropout, is_training, weight_decay, crop,
                           num_input_bands, num_classes)

    elif model_name == 'segnet':
        logits = segnet(x, dropout, is_training, weight_decay, crop,
                        num_input_bands, num_classes, crop_size)
    elif model_name == 'segnet_4':
        logits = segnet_4(x, dropout, is_training, weight_decay, crop,
                          num_input_bands, num_classes, crop_size)

    elif model_name == 'unet':
        logits = unet(x, dropout, is_training, weight_decay, crop,
                      num_input_bands, num_classes, crop_size)

    elif model_name == 'deeplabv3+':
        logits = deeplab(x, dropout, is_training, weight_decay, crop,
                         num_input_bands, num_classes, crop_size)
    else:
        raise NotImplementedError('Network not identified: ' + model_name)
    return logits
    def _network_graph_def(self, verbose=False):
        """
        defines the network graph
        :param verbose: if True prints messages as it defines layers
        :return:
        """
        # placeholders for input and ground truth
        with tf.name_scope('input'):
            self.x = tf.placeholder(name='x',
                                    shape=(None, None, None, 1),
                                    dtype=tf.float32)
            self.y = tf.placeholder(name='y',
                                    shape=(None, None, None, 1),
                                    dtype=tf.float32)
        # by default we use batch norm in train_mode (i.e with current
        if self.name == 'UNET':
            self.sigmoided_logits, self.logits, _, _ = unet(
                self.x,
                1,
                self.num_layers,
                self.feature_maps_root,
                self.norm,
                True,
                verbose=verbose)
        elif self.name == 'iUNET':
            self.sigmoided_logits, self.logits, _, _ = iunet(
                self.x,
                1,
                self.num_layers,
                self.feature_maps_root,
                self.norm,
                True,
                self.n_iterations,
                verbose=verbose)
        elif self.name == 'SHN':
            self.sigmoided_logits, self.logits, _, _ = shn(
                self.x,
                1,
                self.num_layers,
                self.feature_maps_root,
                self.norm,
                True,
                self.n_modules,
                verbose=verbose)

        self.output = self.sigmoided_logits if self.name == 'UNET' else self.sigmoided_logits[
            -1]
        self.variables = [
            v for v in tf.global_variables() if self.name in v.name
        ]
def test(model_name, iter_num, gpu_id, vol_size=(160,192,224), nf_enc=[16,32,32,32], nf_dec=[32,32,32,32,32,16,16,3]):
	"""
	test

	nf_enc and nf_dec
	#nf_dec = [32,32,32,32,32,16,16,3]
    # This needs to be changed. Ideally, we could just call load_model, and we wont have to
    # specify the # of channels here, but the load_model is not working with the custom loss...
    """  

	gpu = '/gpu:' + str(gpu_id)

	# Anatomical labels we want to evaluate
	labels = sio.loadmat('../data/labels.mat')['labels'][0]

	atlas = np.load('../data/atlas_norm.npz')
	atlas_vol = atlas['vol']
	atlas_seg = atlas['seg']
	atlas_vol = np.reshape(atlas_vol, (1,)+atlas_vol.shape+(1,))

	config = tf.ConfigProto()
	config.gpu_options.allow_growth = True
	config.allow_soft_placement = True
	set_session(tf.Session(config=config))

	# load weights of model
	with tf.device(gpu):
		net = networks.unet(vol_size, nf_enc, nf_dec)
		net.load_weights('../models/' + model_name +
                         '/' + str(iter_num) + '.h5')

	xx = np.arange(vol_size[1])#192
	yy = np.arange(vol_size[0])#160
	zz = np.arange(vol_size[2])#224
	grid = np.rollaxis(np.array(np.meshgrid(xx, yy, zz)), 0, 4)#(160,192,224,3) it stores the co-ordinate of the original position of the point in the

	X_vol, X_seg = datagenerators.load_example_by_name('../data/test_vol.npz', '../data/test_seg.npz')

	with tf.device(gpu):
		pred = net.predict([X_vol, atlas_vol])

	# Warp segments with flow
	flow = pred[1][0, :, :, :, :]#(160,192,224,3)
	sample = flow+grid#add the original position with the shift flow the dimension is: (160,192,224,3)
	sample = np.stack((sample[:, :, :, 1], sample[:, :, :, 0], sample[:, :, :, 2]), 3)
	warp_seg = interpn((yy, xx, zz), X_seg[0, :, :, :, 0], sample, method='nearest', bounds_error=False, fill_value=0)

	vals, _ = dice(warp_seg, atlas_seg, labels=labels, nargout=2)
	print(np.mean(vals), np.std(vals))
Esempio n. 8
0
def test(model_name, iter_num, gpu_id, vol_size=(160,192,224), nf_enc=[16,32,32,32], nf_dec=[32,32,32,32,32,16,16,3]):
	"""
	test

	nf_enc and nf_dec
	#nf_dec = [32,32,32,32,32,16,16,3]
    # This needs to be changed. Ideally, we could just call load_model, and we wont have to
    # specify the # of channels here, but the load_model is not working with the custom loss...
    """  

	gpu = '/gpu:' + str(gpu_id)

	# Anatomical labels we want to evaluate
	labels = sio.loadmat('../data/labels.mat')['labels'][0]

	atlas = np.load('../data/atlas_norm.npz')
	atlas_vol = atlas['vol']
	atlas_seg = atlas['seg']
	atlas_vol = np.reshape(atlas_vol, (1,)+atlas_vol.shape+(1,))

	config = tf.ConfigProto()
	config.gpu_options.allow_growth = True
	config.allow_soft_placement = True
	set_session(tf.Session(config=config))

	# load weights of model
	with tf.device(gpu):
		net = networks.unet(vol_size, nf_enc, nf_dec)
		net.load_weights('../models/' + model_name +
                         '/' + str(iter_num) + '.h5')

	xx = np.arange(vol_size[1])
	yy = np.arange(vol_size[0])
	zz = np.arange(vol_size[2])
	grid = np.rollaxis(np.array(np.meshgrid(xx, yy, zz)), 0, 4)

	X_vol, X_seg = datagenerators.load_example_by_name('../data/test_vol.npz', '../data/test_seg.npz')

	with tf.device(gpu):
		pred = net.predict([X_vol, atlas_vol])

	# Warp segments with flow
	flow = pred[1][0, :, :, :, :]
	sample = flow+grid
	sample = np.stack((sample[:, :, :, 1], sample[:, :, :, 0], sample[:, :, :, 2]), 3)
	warp_seg = interpn((yy, xx, zz), X_seg[0, :, :, :, 0], sample, method='nearest', bounds_error=False, fill_value=0)

	vals, _ = dice(warp_seg, atlas_seg, labels=labels, nargout=2)
	print(np.mean(vals), np.std(vals))
Esempio n. 9
0
def train(model,save_name, gpu_id, lr, n_iterations, reg_param, model_save_iter):

    model_dir = '../models/' + save_name
    if not os.path.isdir(model_dir):
        os.mkdir(model_dir)

    gpu = '/gpu:' + str(gpu_id)
    os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_id)
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    config.allow_soft_placement = True
    set_session(tf.Session(config=config))


    # UNET filters
    nf_enc = [16,32,32,32]
    if(model == 'vm1'):
        nf_dec = [32,32,32,32,8,8,3]
    else:
        nf_dec = [32,32,32,32,32,16,16,3]

    with tf.device(gpu):
        model = networks.unet(vol_size, nf_enc, nf_dec)
        model.compile(optimizer=Adam(lr=lr), loss=[losses.cc3D(), losses.gradientLoss('l2')], loss_weights=[1.0, reg_param])
        # model.load_weights('../models/udrnet2/udrnet1_1/120000.h5')

    zeroflow = np.zeros((1, vol_size[0], vol_size[1], vol_size[2], 3))

    
    for step in range(0, n_iterations):

        sub = np.load(train_pairs[step % (noftrain ** 2)][0])
        sub = np.reshape(sub, (1,) + sub.shape + (1,))
        tmp = np.load(train_pairs[step % (noftrain ** 2)][1])
        tmp = np.reshape(tmp, (1,) + tmp.shape + (1,))
        
        train_loss = model.train_on_batch([sub, tmp], [tmp, zeroflow])

        printLoss(step, train_loss, keras.get_value(model.optimizer.lr))

        if(step % model_save_iter == 0):
            model.save(model_dir + '/' + str(step) + '.h5')
        if(step % (2*(noftrain ** 2)) == 0 and step > 0):           
            keras.set_value(model.optimizer.lr, keras.get_value(model.optimizer.lr) / 2)
Esempio n. 10
0
def train(model,save_name, gpu_id, lr, n_iterations, reg_param, model_save_iter):

    model_dir = '../models/' + save_name
    if not os.path.isdir(model_dir):
        os.mkdir(model_dir)

    gpu = '/gpu:' + str(gpu_id)
    os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_id)
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    config.allow_soft_placement = True
    set_session(tf.Session(config=config))


    # UNET filters
    nf_enc = [16,32,32,32]
    if(model == 'vm1'):
        nf_dec = [32,32,32,32,8,8,3]
    else:
        nf_dec = [32,32,32,32,32,16,16,3]

    with tf.device(gpu):
        model = networks.unet(vol_size, nf_enc, nf_dec)
        model.compile(optimizer=Adam(lr=lr), loss=[
                      losses.cc3D(), losses.gradientLoss('l2')], loss_weights=[1.0, reg_param])
        # model.load_weights('../models/udrnet2/udrnet1_1/120000.h5')

    train_example_gen = datagenerators.example_gen(train_vol_names)
    zero_flow = np.zeros((1, vol_size[0], vol_size[1], vol_size[2], 3))

    for step in range(0, n_iterations):

        X = train_example_gen.__next__()[0]
        train_loss = model.train_on_batch(
            [X, atlas_vol], [atlas_vol, zero_flow])

        if not isinstance(train_loss, list):
            train_loss = [train_loss]

        printLoss(step, 1, train_loss)

        if(step % model_save_iter == 0):
            model.save(model_dir + '/' + str(step) + '.h5')
Esempio n. 11
0
def train(model,
          model_dir,
          gpu_id,
          lr,
          n_iterations,
          reg_param,
          model_save_iter,
          batch_size=1):
    """
    model training function
    :param model: either vm1 or vm2 (based on CVPR 2018 paper)
    :param model_dir: the model directory to save to
    :param gpu_id: integer specifying the gpu to use
    :param lr: learning rate
    :param n_iterations: number of training iterations
    :param reg_param: the smoothness/reconstruction tradeoff parameter (lambda in CVPR paper)
    :param model_save_iter: frequency with which to save models
    :param batch_size: Optional, default of 1. can be larger, depends on GPU memory and volume size
    """

    # prepare model folder
    if not os.path.isdir(model_dir):
        os.mkdir(model_dir)

    # GPU handling
    os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_id)
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    config.allow_soft_placement = True
    set_session(tf.Session(config=config))

    # UNET filters for voxelmorph-1 and voxelmorph-2,
    # these are architectures presented in CVPR 2018
    nf_enc = [16, 32, 32, 32]
    if model == 'vm1':
        nf_dec = [32, 32, 32, 32, 8, 8]
    else:
        nf_dec = [32, 32, 32, 32, 32, 16, 16]

    # prepare the model
    # in the CVPR layout, the model takes in [image_1, image_2] and outputs [warped_image_1, flow]
    # in the experiments, we use image_2 as atlas
    model = networks.unet(vol_size, nf_enc, nf_dec)
    model.compile(optimizer=Adam(lr=lr),
                  loss=[losses.cc3D(),
                        losses.gradientLoss('l2')],
                  loss_weights=[1.0, reg_param])

    # if you'd like to initialize the data, you can do it here:
    # model.load_weights(os.path.join(model_dir, '120000.h5'))

    # prepare data for training
    train_example_gen = datagenerators.example_gen(train_vol_names)
    zero_flow = np.zeros([batch_size, *vol_size, 3])

    # train. Note: we use train_on_batch and design out own print function as this has enabled
    # faster development and debugging, but one could also use fit_generator and Keras callbacks.
    for step in range(0, n_iterations):

        # get data
        X = next(train_example_gen)[0]

        # train
        train_loss = model.train_on_batch([X, atlas_vol],
                                          [atlas_vol, zero_flow])
        if not isinstance(train_loss, list):
            train_loss = [train_loss]

        # print the loss.
        print_loss(step, 1, train_loss)

        # save model
        if step % model_save_iter == 0:
            model.save(os.path.join(model_dir, str(step) + '.h5'))
Esempio n. 12
0
if __name__ == "__main__":
    print(networks.__file__)
    raw = tf.placeholder(tf.float32, shape=(43, 430, 430))
    raw_batched = tf.reshape(raw, (
        1,
        1,
    ) + (43, 430, 430))

    #unet = networks.unet(raw_batched, 12, 6, [[1, 3, 3], [1, 3, 3], [3, 3, 3]], anisotropy=10)
    unet, fov, anisotropy = networks.unet(
        raw_batched,
        12,
        6, [[1, 3, 3], [1, 3, 3], [3, 3, 3]],
        [[(1, 3, 3),
          (1, 3, 3)], [(1, 3, 3),
                       (1, 3, 3)], [(3, 3, 3),
                                    (3, 3, 3)], [(3, 3, 3), (3, 3, 3)]],
        [[(1, 3, 3),
          (1, 3, 3)], [(1, 3, 3),
                       (1, 3, 3)], [(3, 3, 3),
                                    (3, 3, 3)], [(3, 3, 3), (3, 3, 3)]],
        anisotropy=[10, 1, 1],
        fov=[10, 1, 1])
    # raw = tf.placeholder(tf.float32, shape=(132,)*3)
    # raw_batched = tf.reshape(raw, (1, 1,) + (132,)*3)
    #
    # unet = networks.unet(raw_batched, 24, 3, [[2, 2, 2], [2, 2, 2], [2, 2, 2]])

    dist_batched, fov = networks.conv_pass(unet,
                                           kernel_size=[[1, 1, 1]],
                                           num_fmaps=1,
                                           activation=None,
Esempio n. 13
0
def test(load_iters, gpu_id, vol_size=(160,192,224), nf_enc=[16,32,32,32], nf_dec=[32,32,32,32,32,16,16,3], sample_num = 10, grid_dimension = 4):
    """
    Test of the rigid registration by calculating the dice score between the atlas's segmentation and warped image's segmentation
    :param iter_num: iteration number
    :param gpu_id: gpu id
    :param vol_size: volume's size
    :param nf_enc: number of encode
    :param nf_dec: number of decoder
    :param model_name: load model's name
    :param sample_num: sample grid's dimension, this can be changed to improve the performance
    :param grid_dimension: R(in the formula)'s dimension
    :return: None
    """
    gpu = '/gpu:' + str(gpu_id)

    # Anatomical labels we want to evaluate
    labels = sio.loadmat('../data/labels.mat')['labels'][0]

    atlas = np.load('../data/atlas_norm.npz')
    atlas_vol = atlas['vol']
    atlas_seg = atlas['seg']
    atlas_vol = np.reshape(atlas_vol, (1,)+atlas_vol.shape+(1,))

    os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_id)
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    config.allow_soft_placement = True
    set_session(tf.Session(config=config))

    # load weights of model
    with tf.device(gpu):
        net = networks.unet(vol_size, nf_enc, nf_dec)
        net.load_weights('../rigid_model/' + load_iters + '.h5', by_name=True)

    X_vol, X_seg = datagenerators.load_example_by_name('../data/test_vol.npz', '../data/test_seg.npz')

    orig_vol = X_vol

    theta = 0
    beta = 4
    omega = 0
    X_seg = rotate_img(X_seg[0, :, :, :, 0], theta=theta, beta=beta, omega=omega)
    X_vol = rotate_img(X_vol[0, :, :, :, 0], theta=theta, beta=beta, omega=omega)
    X_seg = X_seg.reshape((1,) + X_seg.shape + (1,))
    X_vol = X_vol.reshape((1,) + X_vol.shape + (1,))

    with tf.device(gpu):
        pred = net.predict([X_vol, atlas_vol])

    # get flow
    flow = pred[1][0, :, :, :, :]

    # sample coordinate(sample_num * sample_num * sample_num)
    x = np.linspace(0, (vol_size[0]/sample_num)*(sample_num-1), sample_num)
    x = x.astype(np.int32)
    y = np.linspace(0, (vol_size[1]/sample_num)*(sample_num-1), sample_num)
    y = y.astype(np.int32)
    z = np.linspace(0, (vol_size[2]/sample_num)*(sample_num-1), sample_num)
    z = z.astype(np.int32)
    index = np.rollaxis(np.array(np.meshgrid(y, x, z)), 0, 4)
    x = index[:, :, :, 1]
    y = index[:, :, :, 0]
    z = index[:, :, :, 2]

    # Y in formula
    x_flow = np.arange(vol_size[0])
    y_flow = np.arange(vol_size[1])
    z_flow = np.arange(vol_size[2])
    grid = np.rollaxis(np.array((np.meshgrid(y_flow, x_flow, z_flow))), 0, 4)# original coordinate
    grid_x = grid_sample(x, y, z, grid[:, :, :, 1], sample_num)
    grid_y = grid_sample(x, y, z, grid[:, :, :, 0], sample_num)
    grid_z = grid_sample(x, y, z, grid[:, :, :, 2], sample_num)#X (10,10,10)

    sample = flow + grid
    sample_x = grid_sample(x, y, z, sample[:, :, :, 1], sample_num)
    sample_y = grid_sample(x, y, z, sample[:, :, :, 0], sample_num)
    sample_z = grid_sample(x, y, z, sample[:, :, :, 2], sample_num)#Y (10,10,10)

    sum_x = np.sum(flow[:, :, :, 1])
    sum_y = np.sum(flow[:, :, :, 0])
    sum_z = np.sum(flow[:, :, :, 2])

    ave_x = sum_x/(vol_size[0] * vol_size[1] * vol_size[2])
    ave_y = sum_y/(vol_size[0] * vol_size[1] * vol_size[2])
    ave_z = sum_z/(vol_size[0] * vol_size[1] * vol_size[2])

    # formula
    Y = np.zeros((sample_num, sample_num, sample_num, grid_dimension))
    X = np.zeros((sample_num, sample_num, sample_num, grid_dimension))
    T = np.array([ave_x, ave_y, ave_z, 1])#(4,1)
    #R = np.zeros((10, 10, 10, grid_dimension, grid_dimension))

    for i in np.arange(sample_num):
        for j in np.arange(sample_num):
            for z in np.arange(sample_num):
                Y[i, j, z, :] = np.array([sample_x[i,j,z], sample_y[i,j,z], sample_z[i,j,z], 1])
                Y[i, j, z, :] = Y[i, j, z, :] - T# amend: Y` = Y - T

    for i in np.arange(sample_num):
        for j in np.arange(sample_num):
            for z in np.arange(sample_num):
                X[i, j, z, :] = np.array([grid_x[i, j, z], grid_y[i, j, z], grid_z[i, j, z], 1])

    X = X.reshape((sample_num * sample_num * sample_num, grid_dimension))
    Y = Y.reshape((sample_num * sample_num * sample_num, grid_dimension))
    R = np.dot(np.dot(np.linalg.pinv(np.dot(np.transpose(X), X)), np.transpose(X)), Y)# R
    print(R)
    # build new grid(Use R to do the spatial transform)
    shifted_x = np.arange(vol_size[0])
    shifted_y = np.arange(vol_size[1])
    shifted_z = np.arange(vol_size[2])
    shifted_grid = np.rollaxis(np.array((np.meshgrid(shifted_y, shifted_x, shifted_z))), 0, 4)

    for i in np.arange(vol_size[0]):
        for j in np.arange(vol_size[1]):
            for z in np.arange(vol_size[2]):
                coordinates = np.dot(R, np.array([i, j, z, 1]).reshape(4,1)) +  T.reshape(4,1)
                #print("voxel." + '(' + str(i) + ',' + str(j) + ',' + str(z) + ')')
                shifted_grid[i, j, z, 1] = coordinates[0]
                shifted_grid[i, j, z, 0] = coordinates[1]
                shifted_grid[i, j, z, 2] = coordinates[2]

    # interpolation
    xx = np.arange(vol_size[1])
    yy = np.arange(vol_size[0])
    zz = np.arange(vol_size[2])
    shifted_grid = np.stack((shifted_grid[:, :, :, 1], shifted_grid[:, :, :, 0], shifted_grid[:, :, :, 2]), 3)# notice: the shifted_grid is reverse in x and y, so this step is used for making it back.
    warp_seg = interpn((yy, xx, zz), X_seg[0, :, :, :, 0], shifted_grid, method='nearest', bounds_error=False, fill_value=0)# rigid registration
    warp_vol = interpn((yy, xx, zz), X_vol[0, :, :, :, 0], shifted_grid, method='nearest', bounds_error=False, fill_value=0)# rigid registration

    # CVPR
    #grid = np.rollaxis(np.array(np.meshgrid(xx, yy, zz)), 0, 4)
    #sample = flow + grid
    #sample = np.stack((sample[:, :, :, 1], sample[:, :, :, 0], sample[:, :, :, 2]), 3)
    #warp_seg2 = interpn((yy, xx, zz), X_seg[0, :, :, :, 0], sample, method='nearest', bounds_error=False, fill_value=0)# deformable registration

    # compute dice
    vals, _ = dice(warp_seg, atlas_seg, labels=labels, nargout=2)
    #vals2, _ = dice(X_seg[0, :, :, :, 0], atlas_seg, labels=labels, nargout=2)
    #vals3, _ = dice(warp_seg2, atlas_seg, labels=labels, nargout=2)
    #print("dice before:")
    #print(np.mean(vals2), np.std(vals2))
    #print("dice after deformable registration:")
    #print(np.mean(vals3), np.std(vals3))
    print("dice after rigid registration:")
    print(np.mean(vals), np.std(vals))

    # plot
    #fig1, axs1 = nplt.slices(warp_seg[100, :, :], do_colorbars=True)
    #fig1.savefig('warp_seg100.png')
    #fig2, axs2 = nplt.slices(warp_seg[130, :, :], do_colorbars=True)
    #fig2.savefig('warp_seg130.png')
    #fig3, axs3 = nplt.slices(atlas_seg[100, :, :], do_colorbars=True)
    #fig3.savefig('atlas_seg100.png')
    #fig4, axs4 = nplt.slices(atlas_seg[130, :, :], do_colorbars=True)
    #fig4.savefig('atlas_seg130.png')

    # specify slice
    num_slice = 90

    plt.figure()
    plt.subplot(1, 3, 1)
    plt.imshow(orig_vol[0, :, num_slice, :, 0])
    plt.subplot(1, 3, 2)
    plt.imshow(X_vol[0, :, num_slice, :, 0])
    plt.subplot(1, 3, 3)
    plt.imshow(warp_vol[:, num_slice, :])
    plt.savefig("slice" + str(num_slice) + '_' + str(k) + ".png")
Esempio n. 14
0
def test(model_name,
         iter_num,
         gpu_id,
         n_test,
         filename,
         vol_size=(160, 192, 224),
         nf_enc=[16, 32, 32, 32],
         nf_dec=[32, 32, 32, 32, 32, 16, 16]):
    """
    test

    nf_enc and nf_dec
    #nf_dec = [32,32,32,32,32,16,16,3]
    # This needs to be changed. Ideally, we could just call load_model, and we wont have to
    # specify the # of channels here, but the load_model is not working with the custom loss...
    """
    start_time = time.time()
    gpu = '/gpu:' + str(gpu_id)
    print(gpu)
    os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'

    # Anatomical labels we want to evaluate
    labels = sio.loadmat('../data/labels.mat')['labels'][0]

    atlas = np.load('../data/atlas_norm.npz')
    atlas_vol = atlas['vol']
    atlas_seg = atlas['seg']
    atlas_vol = np.reshape(atlas_vol, (1, ) + atlas_vol.shape + (1, ))

    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    config.allow_soft_placement = True
    set_session(tf.Session(config=config))

    # load weights of model
    with tf.device(gpu):
        net = networks.unet(vol_size, nf_enc, nf_dec)
        net.load_weights('../models/' + model_name + '/' + str(iter_num) +
                         '.h5')

    seg_path = '../models/seg_pretrained/0.h5'
    feature_model, num_features = networks.segmenter_feature_model(seg_path)

    with open('seg_feature_stats.txt', 'rb') as file:
        feature_stats = pickle.loads(
            file.read())  # use `pickle.loads` to do the reverse

    xx = np.arange(vol_size[1])
    yy = np.arange(vol_size[0])
    zz = np.arange(vol_size[2])
    grid = np.rollaxis(np.array(np.meshgrid(xx, yy, zz)), 0, 4)

    percentile = 99
    dice_means = []

    results = {}

    for step in range(0, n_test):

        res = {}

        vol_name, seg_name = test_brain_strings[step].split(",")
        X_vol, X_seg = datagenerators.load_example_by_name(vol_name, seg_name)

        with tf.device(gpu):
            pred = net.predict([X_vol, atlas_vol])
            warped_image = np.transpose(pred[0][0, :, :, :, :], (2, 0, 1, 3))
            pred_ac_features = feature_model.predict([warped_image])
            orig_ac_features = feature_model.predict(
                [np.transpose(X_vol[0, :, :, :, :], (2, 0, 1, 3))])

        # Warp segments with flow
        flow = pred[1][0, :, :, :, :]
        sample = flow + grid
        sample = np.stack(
            (sample[:, :, :, 1], sample[:, :, :, 0], sample[:, :, :, 2]), 3)
        warp_seg = interpn((yy, xx, zz),
                           X_seg[0, :, :, :, 0],
                           sample,
                           method='nearest',
                           bounds_error=False,
                           fill_value=0)

        vals, _ = dice(warp_seg, atlas_seg, labels=labels, nargout=2)
        # print(np.mean(vals), np.std(vals))
        mean = np.mean(vals)
        std = np.std(vals)

        res['dice_mean'] = mean
        res['dice_std'] = std

        for i in range(len(pred_ac_features)):
            normalized_pred = normalize_percentile(pred_ac_features[i],
                                                   percentile,
                                                   feature_stats,
                                                   i,
                                                   twod=True)
            normalized_orig = normalize_percentile(orig_ac_features[i],
                                                   percentile,
                                                   feature_stats,
                                                   i,
                                                   twod=True)

            for j in range(normalized_pred.shape[-1]):
                pred_feature = normalized_pred[:, :, :, j]
                orig_feature = normalized_orig[:, :, :, j]
                append_to_dict(res, 'l1_diff',
                               np.mean(np.abs(pred_feature - orig_feature)))
                append_to_dict(res, 'l2_diff',
                               np.mean(np.square(pred_feature - orig_feature)))

                append_to_dict(res, 'pred_mean', np.mean(pred_feature))
                append_to_dict(res, 'pred_std', np.std(pred_feature))
                append_to_dict(res, 'pred_99pc',
                               np.percentile(pred_feature, 99))
                append_to_dict(res, 'pred_1pc', np.percentile(pred_feature, 1))

                append_to_dict(res, 'orig_mean', np.mean(orig_feature))
                append_to_dict(res, 'orig_std', np.std(orig_feature))
                append_to_dict(res, 'orig_99pc',
                               np.percentile(orig_feature, 99))
                append_to_dict(res, 'orig_1pc', np.percentile(orig_feature, 1))

        dice_means.append(mean)

        results[vol_name] = res
        print(step, mean, std)
        print('time:', time.time() - start_time)

    print('average dice:', np.mean(dice_means))
    print('time taken:', time.time() - start_time)
    # for key, value in results.items():
    #     print(key)
    #     print(value)

    with open(filename, 'wb') as file:
        file.write(
            pickle.dumps(results))  # use `pickle.loads` to do the reverse
if __name__ == "__main__":
    print(networks.__file__)
    raw = tf.placeholder(tf.float32, shape=(43, 430, 430))
    raw_batched = tf.reshape(raw, (
        1,
        1,
    ) + (43, 430, 430))

    #unet = networks.unet(raw_batched, 12, 6, [[1, 3, 3], [1, 3, 3], [3, 3, 3]], anisotropy=10)
    unet, fov, voxel_size = networks.unet(
        raw_batched,
        12,
        6, [[1, 3, 3], [1, 3, 3], [3, 3, 3]],
        [[(1, 3, 3),
          (1, 3, 3)], [(1, 3, 3),
                       (1, 3, 3)], [(3, 3, 3),
                                    (3, 3, 3)], [(3, 3, 3), (3, 3, 3)]],
        [[(1, 3, 3),
          (1, 3, 3)], [(1, 3, 3),
                       (1, 3, 3)], [(3, 3, 3),
                                    (3, 3, 3)], [(3, 3, 3), (3, 3, 3)]],
        voxel_size=(10, 1, 1),
        fov=(10, 1, 1))
    # raw = tf.placeholder(tf.float32, shape=(132,)*3)
    # raw_batched = tf.reshape(raw, (1, 1,) + (132,)*3)
    #
    # unet = networks.unet(raw_batched, 24, 3, [[2, 2, 2], [2, 2, 2], [2, 2, 2]])

    dist_batched, fov = networks.conv_pass(unet,
                                           kernel_size=[[1, 1, 1]],
                                           num_fmaps=1,
                                           activation=None,
Esempio n. 16
0
def train(model,save_name, gpu_id, lr, n_iterations, reg_param, model_save_iter, alpha):

    model_dir = '../models/' + save_name
    if not os.path.isdir(model_dir):
        os.mkdir(model_dir)

    gpu = '/gpu:' + str(gpu_id)
    os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_id)
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    config.allow_soft_placement = True
    set_session(tf.Session(config=config))


    # UNET filters
    nf_enc = [16,32,32,32]
    if(model == 'vm1'):
        nf_dec = [32,32,32,32,8,8,3]
    else:
        nf_dec = [32,32,32,32,32,16,16,3]
        
    nf = [0, 8, 16, 32, 64]

    with tf.device(gpu):
        deformer = networks.unet(vol_size, nf_enc, nf_dec)
        #deformer.compile(optimizer=Adam(lr=lr), loss=[losses.cc3D(), losses.gradientLoss('l2')], loss_weights=[1.0, reg_param])
        # model.load_weights('../models/udrnet2/udrnet1_1/120000.h5')
        
        discriminator = networks.similarity_net(vol_size, nf)
        discriminator.compile(optimizer=Adam(lr), loss='binary_crossentropy')
        discriminator.trainable = False
        
        # Build GAN model
        src = Input(shape=vol_size + (1,))
        tgt = Input(shape=vol_size + (1,))
        [warp, df] = deformer([src,tgt])
        #pdb.set_trace()
        sim_p = discriminator([warp, tgt])

        GAN = Model([src, tgt], [sim_p, df])
        GAN.compile(optimizer=Adam(lr), loss=['binary_crossentropy',losses.gradientLoss('l2')], loss_weights=[1.0, reg_param])
        GAN.summary()
        
    sz = sim_p.shape.as_list()
    sz[0] = 1
    p_one = np.ones(sz)
    p_zero = np.zeros(sz)
    zeroflow = np.zeros((1, vol_size[0], vol_size[1], vol_size[2], 3))

    for step in range(0, n_iterations):
        print 'Epoch ' + str(step // (n_pairs)) + ', Total iterations ' + str(step) + ', lr= ' + str(keras.get_value(discriminator.optimizer.lr)) + ':'
        
        sub = np.load(train_pairs[step % (n_pairs)][0])
        sub = np.reshape(sub, (1,) + sub.shape + (1,))
        tmp = np.load(train_pairs[step % (n_pairs)][1])
        tmp = np.reshape(tmp, (1,) + tmp.shape + (1,))
        
        ref_sub = np.load(ref_pairs[step % (n_ref)][0])
        ref_sub = np.reshape(ref_sub, (1,) + ref_sub.shape + (1,))
        ref_tmp = np.load(ref_pairs[step % (n_ref)][1])
        ref_tmp = np.reshape(ref_tmp, (1,) + ref_tmp.shape + (1,))
        
        
        ## ----------------  Train deformer --------------------------------##
        keras.set_value(GAN.optimizer.lr, lr)
        g_loss = GAN.train_on_batch([sub, tmp],[p_one, zeroflow]) 
        print '  Train deformer: ' + str(g_loss[1])        
        print '  Regularization: ' + str(g_loss[2])
            
        
        ## ----------------  Train discriminator for reference --------------##
        fused = alpha * sub + (1-alpha) * tmp
        d_loss1 = discriminator.test_on_batch([fused, tmp], p_one)
        ## --------------------Tricks of blancing Registor and Discriminator-------------#
        if d_loss1 > 0.6:
            keras.set_value(discriminator.optimizer.lr, lr)
        elif d_loss1 > 0.4:
            keras.set_value(discriminator.optimizer.lr, lr * 0.1)
        elif d_loss1 > 0.2:
            keras.set_value(discriminator.optimizer.lr, lr * 0.01)
        else:
            keras.set_value(discriminator.optimizer.lr, lr * 0)
        d_loss1 = discriminator.train_on_batch([fused, tmp], p_one)
        print '  Test discriminator for positive sample: ' + str(d_loss1)    
            
            
        ## ----------------  Train discriminator for deformer --------------##
        [warped, deform] = deformer.predict([sub, tmp])    
        d_loss0 = discriminator.test_on_batch([warped, tmp], p_zero)
        if d_loss0 > 0.6:
            keras.set_value(discriminator.optimizer.lr, lr)
        elif d_loss0 > 0.4:
            keras.set_value(discriminator.optimizer.lr, lr * 0.1)
        elif d_loss0 > 0.2:
            keras.set_value(discriminator.optimizer.lr, lr * 0.01)
        else:
            keras.set_value(discriminator.optimizer.lr, lr * 0)
        d_loss0 = discriminator.train_on_batch([warped, tmp], p_zero)
        print '  Test discriminator for negative sample: ' + str(d_loss0)                

            
        if(step % model_save_iter == 0):
            deformer.save(model_dir + '/' + str(step) + '.h5')
            
        #if(step % (20 * n_pairs) == 0 and step > 0):           
            #lr = lr / 2
            #alpha = np.abs(alpha - 0.05)

        sys.stdout.flush()
Esempio n. 17
0
import networks
import tensorflow as tf
import json

if __name__ == "__main__":

    raw = tf.placeholder(tf.float32, shape=(196, ) * 3)
    raw_batched = tf.reshape(raw, (
        1,
        1,
    ) + (196, ) * 3)

    unet = networks.unet(raw_batched, 12, 6, [[2, 2, 2], [2, 2, 2], [3, 3, 3]])

    affs_batched = networks.conv_pass(unet,
                                      kernel_size=1,
                                      num_fmaps=3,
                                      num_repetitions=1,
                                      activation='sigmoid')

    output_shape_batched = affs_batched.get_shape().as_list()
    output_shape = output_shape_batched[1:]  # strip the batch dimension

    affs = tf.reshape(affs_batched, output_shape)

    gt_affs = tf.placeholder(tf.float32, shape=output_shape)

    loss_weights = tf.placeholder(tf.float32, shape=output_shape)

    loss = tf.losses.mean_squared_error(gt_affs, affs, loss_weights)
    tf.summary.scalar('loss_total', loss)
def test(iter_num, gpu_id, vol_size=(160,192,224), nf_enc=[16,32,32,32], nf_dec=[32,32,32,32,32,16,16,3]):
 gpu = '/gpu:' + str(gpu_id)

 # Anatomical labels we want to evaluate
 labels = sio.loadmat('../data/labels.mat')['labels'][0]

 # read atlas
 atlas_vol1, atlas_seg1 = datagenerators.load_example_by_name('/home/ys895/resize256/resize256-crop_x32/FromEugenio_prep/vols/990114_vc722.npz',
                                                              '/home/ys895/resize256/resize256-crop_x32/FromEugenio_prep/labels/990114_vc722.npz')# [1,160,192,224,1]
 atlas_seg1 = atlas_seg1[0,:,:,:,0]# reduce the dimension to [160,192,224]

 atlas_vol2, atlas_seg2 = datagenerators.load_example_by_name('/home/ys895/resize256/resize256-crop_x32/FromEugenio_prep/vols/990210_vc792.npz',
                                                              '/home/ys895/resize256/resize256-crop_x32/FromEugenio_prep/labels/990210_vc792.npz')
 atlas_seg2 = atlas_seg2[0, :, :, :, 0]

 #gpu = '/gpu:' + str(gpu_id)
 os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_id)
 config = tf.ConfigProto()
 config.gpu_options.allow_growth = True
 config.allow_soft_placement = True
 set_session(tf.Session(config=config))

 # load weights of model
 with tf.device(gpu):
    net = networks.unet(vol_size, nf_enc, nf_dec)
    net.load_weights('/home/ys895/MAS2_Models/'+str(iter_num)+'.h5')
    #net.load_weights('../models/' + model_name + '/' + str(iter_num) + '.h5')

 xx = np.arange(vol_size[1])
 yy = np.arange(vol_size[0])
 zz = np.arange(vol_size[2])
 grid = np.rollaxis(np.array(np.meshgrid(xx, yy, zz)), 0, 4) # (160, 192, 224, 3)
 #X_vol, X_seg = datagenerators.load_example_by_name('../data/test_vol.npz', '../data/test_seg.npz')
 X_vol1, X_seg1 = datagenerators.load_example_by_name('/home/ys895/resize256/resize256-crop_x32/FromEugenio_prep/vols/981216_vc681.npz',
                                                      '/home/ys895/resize256/resize256-crop_x32/FromEugenio_prep/labels/981216_vc681.npz')

 X_vol2, X_seg2 = datagenerators.load_example_by_name('/home/ys895/resize256/resize256-crop_x32/FromEugenio_prep/vols/990205_vc783.npz',
                                                      '/home/ys895/resize256/resize256-crop_x32/FromEugenio_prep/labels/990205_vc783.npz')

 X_vol3, X_seg3 = datagenerators.load_example_by_name('/home/ys895/resize256/resize256-crop_x32/FromEugenio_prep/vols/990525_vc1024.npz',
                                                     '/home/ys895/resize256/resize256-crop_x32/FromEugenio_prep/labels/990525_vc1024.npz')

 X_vol4, X_seg4 = datagenerators.load_example_by_name('/home/ys895/resize256/resize256-crop_x32/FromEugenio_prep/vols/991025_vc1379.npz',
                                                      '/home/ys895/resize256/resize256-crop_x32/FromEugenio_prep/labels/991025_vc1379.npz')

 X_vol5, X_seg5 = datagenerators.load_example_by_name('/home/ys895/resize256/resize256-crop_x32/FromEugenio_prep/vols/991122_vc1463.npz',
                                                     '/home/ys895/resize256/resize256-crop_x32/FromEugenio_prep/labels/991122_vc1463.npz')

 # change the direction of the atlas data and volume data
 # pred[0].shape (1, 160, 192, 224, 1)
 # pred[1].shape (1, 160, 192, 224, 3)
 # X1
 with tf.device(gpu):
    pred1 = net.predict([atlas_vol1, X_vol1])

 # Warp segments with flow
 flow1 = pred1[1][0, :, :, :, :]# (1, 160, 192, 224, 3)

 sample1 = flow1+grid
 sample1 = np.stack((sample1[:, :, :, 1], sample1[:, :, :, 0], sample1[:, :, :, 2]), 3)

 warp_seg1 = interpn((yy, xx, zz), atlas_seg1[ :, :, : ], sample1, method='nearest', bounds_error=False, fill_value=0) # (160, 192, 224)



 # label fusion: get the final warp_seg
 warp_seg = np.empty((160, 192, 224))
 for x in range(0,160):
     for y in range(0,192):
         for z in range(0,224):
          warp_seg = np.array(warp_seg1[x, y, z])

 vals, _ = dice(warp_seg, X_seg1[0, :, :, :, 0], labels=labels, nargout=2)
 mean1 = np.mean(vals)

 # X2
 with tf.device(gpu):
    pred1 = net.predict([atlas_vol1, X_vol2])

 # Warp segments with flow
 flow1 = pred1[1][0, :, :, :, :]# (1, 160, 192, 224, 3)

 sample1 = flow1+grid
 sample1 = np.stack((sample1[:, :, :, 1], sample1[:, :, :, 0], sample1[:, :, :, 2]), 3)

 warp_seg1 = interpn((yy, xx, zz), atlas_seg1[ :, :, : ], sample1, method='nearest', bounds_error=False, fill_value=0) # (160, 192, 224)



 # label fusion: get the final warp_seg
 warp_seg = np.empty((160, 192, 224))
 for x in range(0,160):
     for y in range(0,192):
         for z in range(0,224):
          warp_seg = np.array(warp_seg1[x, y, z])

 vals, _ = dice(warp_seg, X_seg2[0,:,:,:,0], labels=labels, nargout=2)
 mean2 = np.mean(vals)
 #print(np.mean(vals), np.std(vals))

# X3
 with tf.device(gpu):
    pred1 = net.predict([atlas_vol1, X_vol3])

 # Warp segments with flow
 flow1 = pred1[1][0, :, :, :, :]# (1, 160, 192, 224, 3)

 sample1 = flow1+grid
 sample1 = np.stack((sample1[:, :, :, 1], sample1[:, :, :, 0], sample1[:, :, :, 2]), 3)

 warp_seg1 = interpn((yy, xx, zz), atlas_seg1[ :, :, : ], sample1, method='nearest', bounds_error=False, fill_value=0) # (160, 192, 224)



 # label fusion: get the final warp_seg
 warp_seg = np.empty((160, 192, 224))
 for x in range(0,160):
     for y in range(0,192):
         for z in range(0,224):
           warp_seg = np.array(warp_seg1[x, y, z])

 vals, _ = dice(warp_seg, X_seg3[0, :, :, :, 0], labels=labels, nargout=2)
 mean3 = np.mean(vals)

# X4
 with tf.device(gpu):
    pred1 = net.predict([atlas_vol1, X_vol4])

 # Warp segments with flow
 flow1 = pred1[1][0, :, :, :, :]# (1, 160, 192, 224, 3)

 sample1 = flow1+grid
 sample1 = np.stack((sample1[:, :, :, 1], sample1[:, :, :, 0], sample1[:, :, :, 2]), 3)

 warp_seg1 = interpn((yy, xx, zz), atlas_seg1[ :, :, : ], sample1, method='nearest', bounds_error=False, fill_value=0) # (160, 192, 224)


 # label fusion: get the final warp_seg
 warp_seg = np.empty((160, 192, 224))
 for x in range(0,160):
     for y in range(0,192):
         for z in range(0,224):
           warp_seg = np.array(warp_seg1[x, y, z])

 vals, _ = dice(warp_seg, X_seg4[0, :, :, :, 0], labels=labels, nargout=2)
 mean4 = np.mean(vals)


# X5
 with tf.device(gpu):
    pred1 = net.predict([atlas_vol1, X_vol5])

 # Warp segments with flow
 flow1 = pred1[1][0, :, :, :, :]# (1, 160, 192, 224, 3)

 sample1 = flow1+grid
 sample1 = np.stack((sample1[:, :, :, 1], sample1[:, :, :, 0], sample1[:, :, :, 2]), 3)

 warp_seg1 = interpn((yy, xx, zz), atlas_seg1[ :, :, : ], sample1, method='nearest', bounds_error=False, fill_value=0) # (160, 192, 224)


 # label fusion: get the final warp_seg
 warp_seg = np.empty((160, 192, 224))
 for x in range(0,160):
     for y in range(0,192):
         for z in range(0,224):
             warp_seg = np.array(warp_seg1[x,y,z])
             #print(warp_arr)
             #warp_seg[x,y,z] = stats.mode(warp_arr)[0]

 vals, _ = dice(warp_seg, X_seg5[0, :, :, :, 0], labels=labels, nargout=2)
 mean5 = np.mean(vals)

 # compute mean of dice score
 sum = mean1 + mean2 + mean3 + mean4 + mean5
 mean_dice = sum/5
 print(mean_dice)
Esempio n. 19
0
def test(iter_num,
         gpu_id,
         vol_size=(160, 192, 224),
         nf_enc=[16, 32, 32, 32],
         nf_dec=[32, 32, 32, 32, 32, 16, 16, 3]):
    gpu = '/gpu:' + str(gpu_id)

    # Anatomical labels we want to evaluate
    labels = sio.loadmat('../data/labels.mat')['labels'][0]

    # read atlas
    atlas_vol1, atlas_seg1 = datagenerators.load_example_by_name(
        '/home/ys895/resize256/resize256-crop_x32/FromEugenio_prep/vols/990114_vc722.npz',
        '/home/ys895/resize256/resize256-crop_x32/FromEugenio_prep/labels/990114_vc722.npz'
    )  # [1,160,192,224,1]
    atlas_seg1 = atlas_seg1[0, :, :, :,
                            0]  # reduce the dimension to [160,192,224]
    atlas_seg1 = keras.utils.to_categorical(atlas_seg1)
    atlas_vol2, atlas_seg2 = datagenerators.load_example_by_name(
        '/home/ys895/resize256/resize256-crop_x32/FromEugenio_prep/vols/990210_vc792.npz',
        '/home/ys895/resize256/resize256-crop_x32/FromEugenio_prep/labels/990210_vc792.npz'
    )
    atlas_seg2 = atlas_seg2[0, :, :, :, 0]
    atlas_seg2 = keras.utils.to_categorical(atlas_seg2)
    atlas_vol3, atlas_seg3 = datagenerators.load_example_by_name(
        '/home/ys895/resize256/resize256-crop_x32/FromEugenio_prep/vols/990405_vc922.npz',
        '/home/ys895/resize256/resize256-crop_x32/FromEugenio_prep/labels/990405_vc922.npz'
    )
    atlas_seg3 = atlas_seg3[0, :, :, :, 0]
    atlas_seg3 = keras.utils.to_categorical(atlas_seg3)
    atlas_vol4, atlas_seg4 = datagenerators.load_example_by_name(
        '/home/ys895/resize256/resize256-crop_x32/FromEugenio_prep/vols/991006_vc1337.npz',
        '/home/ys895/resize256/resize256-crop_x32/FromEugenio_prep/labels/991006_vc1337.npz'
    )
    atlas_seg4 = atlas_seg4[0, :, :, :, 0]
    atlas_seg4 = keras.utils.to_categorical(atlas_seg4)
    atlas_vol5, atlas_seg5 = datagenerators.load_example_by_name(
        '/home/ys895/resize256/resize256-crop_x32/FromEugenio_prep/vols/991120_vc1456.npz',
        '/home/ys895/resize256/resize256-crop_x32/FromEugenio_prep/labels/991120_vc1456.npz'
    )
    atlas_seg5 = atlas_seg5[0, :, :, :, 0]
    atlas_seg5 = keras.utils.to_categorical(atlas_seg5)
    #atlas = np.load('../data/atlas_norm.npz')
    #atlas_vol = atlas['vol']
    #print('the size of atlas:')
    #print(atlas_vol.shape)
    #atlas_seg = atlas['seg']
    #atlas_vol = np.reshape(atlas_vol, (1,)+atlas_vol.shape+(1,))

    #gpu = '/gpu:' + str(gpu_id)
    os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_id)
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    config.allow_soft_placement = True
    set_session(tf.Session(config=config))

    # load weights of model
    with tf.device(gpu):
        net = networks.unet(vol_size, nf_enc, nf_dec)
        net.load_weights('/home/ys895/MAS_Models/' + str(iter_num) + '.h5')
        #net.load_weights('../models/' + model_name + '/' + str(iter_num) + '.h5')

    xx = np.arange(vol_size[1])
    yy = np.arange(vol_size[0])
    zz = np.arange(vol_size[2])
    grid = np.rollaxis(np.array(np.meshgrid(xx, yy, zz)), 0,
                       4)  # (160, 192, 224, 3)
    #X_vol, X_seg = datagenerators.load_example_by_name('../data/test_vol.npz', '../data/test_seg.npz')
    X_vol1, X_seg1 = datagenerators.load_example_by_name(
        '/home/ys895/resize256/resize256-crop_x32/FromEugenio_prep/vols/981216_vc681.npz',
        '/home/ys895/resize256/resize256-crop_x32/FromEugenio_prep/labels/981216_vc681.npz'
    )

    X_vol2, X_seg2 = datagenerators.load_example_by_name(
        '/home/ys895/resize256/resize256-crop_x32/FromEugenio_prep/vols/990205_vc783.npz',
        '/home/ys895/resize256/resize256-crop_x32/FromEugenio_prep/labels/990205_vc783.npz'
    )

    X_vol3, X_seg3 = datagenerators.load_example_by_name(
        '/home/ys895/resize256/resize256-crop_x32/FromEugenio_prep/vols/990525_vc1024.npz',
        '/home/ys895/resize256/resize256-crop_x32/FromEugenio_prep/labels/990525_vc1024.npz'
    )

    X_vol4, X_seg4 = datagenerators.load_example_by_name(
        '/home/ys895/resize256/resize256-crop_x32/FromEugenio_prep/vols/991025_vc1379.npz',
        '/home/ys895/resize256/resize256-crop_x32/FromEugenio_prep/labels/991025_vc1379.npz'
    )

    X_vol5, X_seg5 = datagenerators.load_example_by_name(
        '/home/ys895/resize256/resize256-crop_x32/FromEugenio_prep/vols/991122_vc1463.npz',
        '/home/ys895/resize256/resize256-crop_x32/FromEugenio_prep/labels/991122_vc1463.npz'
    )

    # change the direction of the atlas data and volume data
    # pred[0].shape (1, 160, 192, 224, 1)
    # pred[1].shape (1, 160, 192, 224, 3)
    # X1
    with tf.device(gpu):
        pred1 = net.predict([atlas_vol1, X_vol1])
        pred2 = net.predict([atlas_vol2, X_vol1])
        pred3 = net.predict([atlas_vol3, X_vol1])
        pred4 = net.predict([atlas_vol4, X_vol1])
        pred5 = net.predict([atlas_vol5, X_vol1])
    # Warp segments with flow
    flow1 = pred1[1][0, :, :, :, :]  # (1, 160, 192, 224, 3)
    flow2 = pred2[1][0, :, :, :, :]
    flow3 = pred3[1][0, :, :, :, :]
    flow4 = pred4[1][0, :, :, :, :]
    flow5 = pred5[1][0, :, :, :, :]

    sample1 = flow1 + grid
    sample1 = np.stack(
        (sample1[:, :, :, 1], sample1[:, :, :, 0], sample1[:, :, :, 2]), 3)
    sample2 = flow2 + grid
    sample2 = np.stack(
        (sample2[:, :, :, 1], sample2[:, :, :, 0], sample2[:, :, :, 2]), 3)
    sample3 = flow3 + grid
    sample3 = np.stack(
        (sample3[:, :, :, 1], sample3[:, :, :, 0], sample3[:, :, :, 2]), 3)
    sample4 = flow4 + grid
    sample4 = np.stack(
        (sample4[:, :, :, 1], sample4[:, :, :, 0], sample4[:, :, :, 2]), 3)
    sample5 = flow5 + grid
    sample5 = np.stack(
        (sample5[:, :, :, 1], sample5[:, :, :, 0], sample5[:, :, :, 2]), 3)

    warp_seg1 = interpn((yy, xx, zz),
                        atlas_seg1[:, :, :, :],
                        sample1,
                        method='linear',
                        bounds_error=False,
                        fill_value=0)  # (160, 192, 224)
    warp_seg2 = interpn((yy, xx, zz),
                        atlas_seg2[:, :, :, :],
                        sample2,
                        method='linear',
                        bounds_error=False,
                        fill_value=0)
    warp_seg3 = interpn((yy, xx, zz),
                        atlas_seg3[:, :, :, :],
                        sample3,
                        method='linear',
                        bounds_error=False,
                        fill_value=0)
    warp_seg4 = interpn((yy, xx, zz),
                        atlas_seg4[:, :, :, :],
                        sample4,
                        method='linear',
                        bounds_error=False,
                        fill_value=0)
    warp_seg5 = interpn((yy, xx, zz),
                        atlas_seg5[:, :, :, :],
                        sample5,
                        method='linear',
                        bounds_error=False,
                        fill_value=0)

    # label fusion: get the final warp_seg
    warp_seg = np.empty((160, 192, 224, atlas_seg1.shape[3]))
    warp_seg = (warp_seg1 + warp_seg2 + warp_seg3 + warp_seg4 + warp_seg5) / 5
    warp_seg = np.argmax(warp_seg, axis=3)

    vals, _ = dice(warp_seg, X_seg1[0, :, :, :, 0], labels=labels, nargout=2)
    mean1 = np.mean(vals)
    var1 = np.std(vals)

    # X2
    with tf.device(gpu):
        pred1 = net.predict([atlas_vol1, X_vol2])
        pred2 = net.predict([atlas_vol2, X_vol2])
        pred3 = net.predict([atlas_vol3, X_vol2])
        pred4 = net.predict([atlas_vol4, X_vol2])
        pred5 = net.predict([atlas_vol5, X_vol2])
    # Warp segments with flow
    flow1 = pred1[1][0, :, :, :, :]  # (1, 160, 192, 224, 3)
    flow2 = pred2[1][0, :, :, :, :]
    flow3 = pred3[1][0, :, :, :, :]
    flow4 = pred4[1][0, :, :, :, :]
    flow5 = pred5[1][0, :, :, :, :]

    sample1 = flow1 + grid
    sample1 = np.stack(
        (sample1[:, :, :, 1], sample1[:, :, :, 0], sample1[:, :, :, 2]), 3)
    sample2 = flow2 + grid
    sample2 = np.stack(
        (sample2[:, :, :, 1], sample2[:, :, :, 0], sample2[:, :, :, 2]), 3)
    sample3 = flow3 + grid
    sample3 = np.stack(
        (sample3[:, :, :, 1], sample3[:, :, :, 0], sample3[:, :, :, 2]), 3)
    sample4 = flow4 + grid
    sample4 = np.stack(
        (sample4[:, :, :, 1], sample4[:, :, :, 0], sample4[:, :, :, 2]), 3)
    sample5 = flow5 + grid
    sample5 = np.stack(
        (sample5[:, :, :, 1], sample5[:, :, :, 0], sample5[:, :, :, 2]), 3)

    warp_seg1 = interpn((yy, xx, zz),
                        atlas_seg1[:, :, :, :],
                        sample1,
                        method='linear',
                        bounds_error=False,
                        fill_value=0)  # (160, 192, 224)
    warp_seg2 = interpn((yy, xx, zz),
                        atlas_seg2[:, :, :, :],
                        sample2,
                        method='linear',
                        bounds_error=False,
                        fill_value=0)
    warp_seg3 = interpn((yy, xx, zz),
                        atlas_seg3[:, :, :, :],
                        sample3,
                        method='linear',
                        bounds_error=False,
                        fill_value=0)
    warp_seg4 = interpn((yy, xx, zz),
                        atlas_seg4[:, :, :, :],
                        sample4,
                        method='linear',
                        bounds_error=False,
                        fill_value=0)
    warp_seg5 = interpn((yy, xx, zz),
                        atlas_seg5[:, :, :, :],
                        sample5,
                        method='linear',
                        bounds_error=False,
                        fill_value=0)

    # label fusion: get the final warp_seg
    warp_seg = np.empty((160, 192, 224, atlas_seg1.shape[3]))
    warp_seg = (warp_seg1 + warp_seg2 + warp_seg3 + warp_seg4 + warp_seg5) / 5
    warp_seg = np.argmax(warp_seg, axis=3)

    vals, _ = dice(warp_seg, X_seg2[0, :, :, :, 0], labels=labels, nargout=2)
    mean2 = np.mean(vals)
    var2 = np.std(vals)
    #print(np.mean(vals), np.std(vals))

    # X3
    with tf.device(gpu):
        pred1 = net.predict([atlas_vol1, X_vol3])
        pred2 = net.predict([atlas_vol2, X_vol3])
        pred3 = net.predict([atlas_vol3, X_vol3])
        pred4 = net.predict([atlas_vol4, X_vol3])
        pred5 = net.predict([atlas_vol5, X_vol3])
    # Warp segments with flow
    flow1 = pred1[1][0, :, :, :, :]  # (1, 160, 192, 224, 3)
    flow2 = pred2[1][0, :, :, :, :]
    flow3 = pred3[1][0, :, :, :, :]
    flow4 = pred4[1][0, :, :, :, :]
    flow5 = pred5[1][0, :, :, :, :]

    sample1 = flow1 + grid
    sample1 = np.stack(
        (sample1[:, :, :, 1], sample1[:, :, :, 0], sample1[:, :, :, 2]), 3)
    sample2 = flow2 + grid
    sample2 = np.stack(
        (sample2[:, :, :, 1], sample2[:, :, :, 0], sample2[:, :, :, 2]), 3)
    sample3 = flow3 + grid
    sample3 = np.stack(
        (sample3[:, :, :, 1], sample3[:, :, :, 0], sample3[:, :, :, 2]), 3)
    sample4 = flow4 + grid
    sample4 = np.stack(
        (sample4[:, :, :, 1], sample4[:, :, :, 0], sample4[:, :, :, 2]), 3)
    sample5 = flow5 + grid
    sample5 = np.stack(
        (sample5[:, :, :, 1], sample5[:, :, :, 0], sample5[:, :, :, 2]), 3)

    warp_seg1 = interpn((yy, xx, zz),
                        atlas_seg1[:, :, :, :],
                        sample1,
                        method='linear',
                        bounds_error=False,
                        fill_value=0)
    warp_seg2 = interpn((yy, xx, zz),
                        atlas_seg2[:, :, :, :],
                        sample2,
                        method='linear',
                        bounds_error=False,
                        fill_value=0)
    warp_seg3 = interpn((yy, xx, zz),
                        atlas_seg3[:, :, :, :],
                        sample3,
                        method='linear',
                        bounds_error=False,
                        fill_value=0)
    warp_seg4 = interpn((yy, xx, zz),
                        atlas_seg4[:, :, :, :],
                        sample4,
                        method='linear',
                        bounds_error=False,
                        fill_value=0)
    warp_seg5 = interpn((yy, xx, zz),
                        atlas_seg5[:, :, :, :],
                        sample5,
                        method='linear',
                        bounds_error=False,
                        fill_value=0)

    # label fusion: get the final warp_seg
    warp_seg = np.empty((160, 192, 224, atlas_seg1.shape[3]))
    warp_seg = (warp_seg1 + warp_seg2 + warp_seg3 + warp_seg4 + warp_seg5) / 5
    warp_seg = np.argmax(warp_seg, axis=3)

    vals, _ = dice(warp_seg, X_seg3[0, :, :, :, 0], labels=labels, nargout=2)
    mean3 = np.mean(vals)
    var3 = np.std(vals)

    # X4
    with tf.device(gpu):
        pred1 = net.predict([atlas_vol1, X_vol4])
        pred2 = net.predict([atlas_vol2, X_vol4])
        pred3 = net.predict([atlas_vol3, X_vol4])
        pred4 = net.predict([atlas_vol4, X_vol4])
        pred5 = net.predict([atlas_vol5, X_vol4])
    # Warp segments with flow
    flow1 = pred1[1][0, :, :, :, :]  # (1, 160, 192, 224, 3)
    flow2 = pred2[1][0, :, :, :, :]
    flow3 = pred3[1][0, :, :, :, :]
    flow4 = pred4[1][0, :, :, :, :]
    flow5 = pred5[1][0, :, :, :, :]

    sample1 = flow1 + grid
    sample1 = np.stack(
        (sample1[:, :, :, 1], sample1[:, :, :, 0], sample1[:, :, :, 2]), 3)
    sample2 = flow2 + grid
    sample2 = np.stack(
        (sample2[:, :, :, 1], sample2[:, :, :, 0], sample2[:, :, :, 2]), 3)
    sample3 = flow3 + grid
    sample3 = np.stack(
        (sample3[:, :, :, 1], sample3[:, :, :, 0], sample3[:, :, :, 2]), 3)
    sample4 = flow4 + grid
    sample4 = np.stack(
        (sample4[:, :, :, 1], sample4[:, :, :, 0], sample4[:, :, :, 2]), 3)
    sample5 = flow5 + grid
    sample5 = np.stack(
        (sample5[:, :, :, 1], sample5[:, :, :, 0], sample5[:, :, :, 2]), 3)

    warp_seg1 = interpn((yy, xx, zz),
                        atlas_seg1[:, :, :, :],
                        sample1,
                        method='linear',
                        bounds_error=False,
                        fill_value=0)
    warp_seg2 = interpn((yy, xx, zz),
                        atlas_seg2[:, :, :, :],
                        sample2,
                        method='linear',
                        bounds_error=False,
                        fill_value=0)
    warp_seg3 = interpn((yy, xx, zz),
                        atlas_seg3[:, :, :, :],
                        sample3,
                        method='linear',
                        bounds_error=False,
                        fill_value=0)
    warp_seg4 = interpn((yy, xx, zz),
                        atlas_seg4[:, :, :, :],
                        sample4,
                        method='linear',
                        bounds_error=False,
                        fill_value=0)
    warp_seg5 = interpn((yy, xx, zz),
                        atlas_seg5[:, :, :, :],
                        sample5,
                        method='linear',
                        bounds_error=False,
                        fill_value=0)

    # label fusion: get the final warp_seg
    warp_seg = np.empty((160, 192, 224, atlas_seg1.shape[3]))
    warp_seg = (warp_seg1 + warp_seg2 + warp_seg3 + warp_seg4 + warp_seg5) / 5
    warp_seg = np.argmax(warp_seg, axis=3)

    vals, _ = dice(warp_seg, X_seg4[0, :, :, :, 0], labels=labels, nargout=2)
    mean4 = np.mean(vals)
    var4 = np.std(vals)
    # X5
    with tf.device(gpu):
        pred1 = net.predict([atlas_vol1, X_vol5])
        pred2 = net.predict([atlas_vol2, X_vol5])
        pred3 = net.predict([atlas_vol3, X_vol5])
        pred4 = net.predict([atlas_vol4, X_vol5])
        pred5 = net.predict([atlas_vol5, X_vol5])
    # Warp segments with flow
    flow1 = pred1[1][0, :, :, :, :]  # (1, 160, 192, 224, 3)
    flow2 = pred2[1][0, :, :, :, :]
    flow3 = pred3[1][0, :, :, :, :]
    flow4 = pred4[1][0, :, :, :, :]
    flow5 = pred5[1][0, :, :, :, :]

    sample1 = flow1 + grid
    sample1 = np.stack(
        (sample1[:, :, :, 1], sample1[:, :, :, 0], sample1[:, :, :, 2]), 3)
    sample2 = flow2 + grid
    sample2 = np.stack(
        (sample2[:, :, :, 1], sample2[:, :, :, 0], sample2[:, :, :, 2]), 3)
    sample3 = flow3 + grid
    sample3 = np.stack(
        (sample3[:, :, :, 1], sample3[:, :, :, 0], sample3[:, :, :, 2]), 3)
    sample4 = flow4 + grid
    sample4 = np.stack(
        (sample4[:, :, :, 1], sample4[:, :, :, 0], sample4[:, :, :, 2]), 3)
    sample5 = flow5 + grid
    sample5 = np.stack(
        (sample5[:, :, :, 1], sample5[:, :, :, 0], sample5[:, :, :, 2]), 3)

    warp_seg1 = interpn((yy, xx, zz),
                        atlas_seg1[:, :, :, :],
                        sample1,
                        method='linear',
                        bounds_error=False,
                        fill_value=0)
    warp_seg2 = interpn((yy, xx, zz),
                        atlas_seg2[:, :, :, :],
                        sample2,
                        method='linear',
                        bounds_error=False,
                        fill_value=0)
    warp_seg3 = interpn((yy, xx, zz),
                        atlas_seg3[:, :, :, :],
                        sample3,
                        method='linear',
                        bounds_error=False,
                        fill_value=0)
    warp_seg4 = interpn((yy, xx, zz),
                        atlas_seg4[:, :, :, :],
                        sample4,
                        method='linear',
                        bounds_error=False,
                        fill_value=0)
    warp_seg5 = interpn((yy, xx, zz),
                        atlas_seg5[:, :, :, :],
                        sample5,
                        method='linear',
                        bounds_error=False,
                        fill_value=0)

    # label fusion: get the final warp_seg
    warp_seg = np.empty((160, 192, 224, atlas_seg1.shape[3]))
    warp_seg = (warp_seg1 + warp_seg2 + warp_seg3 + warp_seg4 + warp_seg5) / 5
    warp_seg = np.argmax(warp_seg, axis=3)

    vals, _ = dice(warp_seg, X_seg5[0, :, :, :, 0], labels=labels, nargout=2)
    mean5 = np.mean(vals)
    var5 = np.std(vals)

    # compute mean of dice score
    sum = mean1 + mean2 + mean3 + mean4 + mean5
    mean_dice = sum / 5
    var = (var1 + var2 + var3 + var4 + var5) / 5
    print(str(mean_dice) + ',' + str(var))
Esempio n. 20
0
def train(model,
          pretrained_path,
          model_name,
          gpu_id,
          lr,
          n_iterations,
          autoencoder_iters,
          autoencoder_model,
          autoencoder_num_downsample,
          ac_coef,
          use_normalize,
          norm_percentile,
          use_mse,
          reg_param,
          gamma,
          model_save_iter,
          batch_size=1):
    """
    model training function
    :param model: either vm1 or vm2 (based on CVPR 2018 paper)
    :param model_dir: the model directory to save to
    :param gpu_id: integer specifying the gpu to use
    :param lr: learning rate
    :param n_iterations: number of training iterations
    :param reg_param: the smoothness/reconstruction tradeoff parameter (lambda in CVPR paper)
    :param model_save_iter: frequency with which to save models
    :param batch_size: Optional, default of 1. can be larger, depends on GPU memory and volume size
    """
    os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'

    restrict_GPU_tf(str(gpu_id))
    restrict_GPU_keras(str(gpu_id))

    train_labels = sio.loadmat('../data/labels.mat')['labels'][0]
    n_labels = train_labels.shape[0]

    atlas_vol = atlas['vol']
    atlas_seg = atlas['seg']
    atlas_vol = np.reshape(atlas_vol, (1, ) + atlas_vol.shape + (1, ))
    atlas_seg = np.reshape(atlas_seg, (1, ) + atlas_seg.shape + (1, ))

    atlas_seg = datagenerators.split_seg_into_channels(atlas_seg, train_labels)
    atlas_seg = datagenerators.downsample(atlas_seg)

    model_dir = "../models/" + model_name
    # prepare model folder
    if not os.path.isdir(model_dir):
        os.mkdir(model_dir)

    # GPU handling
    os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_id)
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    config.allow_soft_placement = True
    set_session(tf.Session(config=config))

    # UNET filters for voxelmorph-1 and voxelmorph-2,
    # these are architectures presented in CVPR 2018
    nf_enc = [16, 32, 32, 32]
    if model == 'vm1':
        nf_dec = [32, 32, 32, 32, 8, 8]
    else:
        nf_dec = [32, 32, 32, 32, 32, 16, 16]

    # prepare the model
    # in the CVPR layout, the model takes in [image_1, image_2] and outputs [warped_image_1, flow]
    # in the experiments, we use image_2 as atlas

    autoencoder_path = '../models/%s/%s.h5' % (autoencoder_model,
                                               autoencoder_iters)

    mse_coef = 1.0 if use_mse else 0
    print('mse coef', mse_coef)
    model = networks.unet(vol_size,
                          nf_enc,
                          nf_dec,
                          use_seg=True,
                          n_seg=len(train_labels))
    model.compile(optimizer=Adam(lr=lr),
                  loss=['mse',
                        losses.gradientLoss('l2'), losses.diceLoss],
                  loss_weights=[mse_coef, reg_param, gamma])

    # if you'd like to initialize the data, you can do it here:
    if pretrained_path != None:
        model.load_weights(pretrained_path)

    # prepare data for training
    train_example_gen = datagenerators.example_gen(train_vol_names,
                                                   return_segs=True,
                                                   seg_dir=train_seg_dir)
    zero_flow = np.zeros([batch_size, *vol_size, 3])

    # train. Note: we use train_on_batch and design out own print function as this has enabled
    # faster development and debugging, but one could also use fit_generator and Keras callbacks.
    for step in range(0, n_iterations):

        # get data
        X = next(train_example_gen)
        X_seg = X[1]

        X_seg = datagenerators.split_seg_into_channels(X_seg, train_labels)
        X_seg = datagenerators.downsample(X_seg)

        # train
        train_loss = model.train_on_batch([X[0], atlas_vol, X_seg],
                                          [atlas_vol, zero_flow, atlas_seg])
        if not isinstance(train_loss, list):
            train_loss = [train_loss]

        # print the loss.
        print_loss(step, 1, train_loss)

        # save model
        if step % model_save_iter == 0:
            model.save(os.path.join(model_dir, str(step) + '.h5'))
Esempio n. 21
0
validation_dataset = ImageDataset(input_dir=validation_directory,
                                  transform=True)  #Validation Dataset

num_epochs = n_iters / (len(train_dataset) / batch_size)
num_epochs = int(num_epochs)

train_loader = torch.utils.data.DataLoader(dataset=train_dataset,
                                           batch_size=batch_size,
                                           shuffle=True)

validation_loader = torch.utils.data.DataLoader(
    dataset=validation_dataset,
    batch_size=validation_batch_size,
    shuffle=False)

model = unet()
iteri = 0
iter_new = 0

#checking if checkpoints exist to resume training and create it if not
if os.path.exists(checkpoints_directory_unet) and len(
        os.listdir(checkpoints_directory_unet)):
    checkpoints = os.listdir(checkpoints_directory_unet)
    checkpoints.sort(key=lambda x: int((x.split('_')[2]).split('.')[0]))
    model = torch.load(checkpoints_directory_unet + '/' +
                       checkpoints[-1])  #changed to checkpoints
    iteri = int(re.findall(r'\d+',
                           checkpoints[-1])[0])  # changed to checkpoints
    iter_new = iteri
    print("Resuming from iteration " + str(iteri))
elif not os.path.exists(checkpoints_directory_unet):
Esempio n. 22
0
if __name__ == "__main__":

    raw = tf.placeholder(tf.float32, shape=(132, ) * 3)
    pred = tf.placeholder(tf.float32, shape=(132, ) * 3)

    raw_batched = tf.reshape(raw, (
        1,
        1,
    ) + (132, ) * 3)
    pred_reshaped = tf.reshape(raw, (
        1,
        1,
    ) + (132, ) * 3)

    unet = networks.unet(raw_batched, 24, 3, [[2, 2, 2], [2, 2, 2], [2, 2, 2]])

    affs_batched = networks.conv_pass(unet,
                                      kernel_size=1,
                                      num_fmaps=3,
                                      num_repetitions=1,
                                      activation='sigmoid')

    output_shape_batched = affs_batched.get_shape().as_list()
    output_shape = output_shape_batched[1:]  # strip the batch dimension

    affs = tf.reshape(affs_batched, output_shape)

    gt_affs = tf.placeholder(tf.float32, shape=output_shape)

    loss_weights = tf.placeholder(tf.float32, shape=output_shape)
Esempio n. 23
0
def test(model_name,
         iter_num,
         gpu_id,
         vol_size=(160, 224, 192),
         nf_enc=[16, 32, 32, 32],
         nf_dec=[32, 32, 32, 32, 8, 8, 3]):
    """
    test
    nf_enc and nf_dec
    #nf_dec = [32,32,32,32,32,16,16,3]
    # This needs to be changed. Ideally, we could just call load_model, and we wont have to
    # specify the # of channels here, but the load_model is not working with the custom loss...
    """

    gpu = '/gpu:' + str(gpu_id)

    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    config.allow_soft_placement = True
    set_session(tf.Session(config=config))

    # load weights of model
    with tf.device(gpu):
        net = networks.unet(vol_size, nf_enc, nf_dec)
        net.load_weights('../models/' + model_name + '/' + str(iter_num) +
                         '.h5')

    xx = np.arange(vol_size[1])
    yy = np.arange(vol_size[0])
    zz = np.arange(vol_size[2])
    grid = np.rollaxis(np.array(np.meshgrid(xx, yy, zz)), 0, 4)

    sum_dice = 0

    for i in range(0, noftest):
        X_vol = np.load(subjcet_filepaths[i])
        X_vol = np.reshape(X_vol, (1, ) + X_vol.shape + (1, ))
        X_seg = np.load(seg_filepaths[i])
        #X_seg = np.reshape(X_seg, (1,) + X_seg.shape + (1,))

        for j in range(0, noftest):
            Y_vol = np.load(subjcet_filepaths[j])
            Y_vol = np.reshape(Y_vol, (1, ) + Y_vol.shape + (1, ))
            Y_seg = np.load(seg_filepaths[j])
            #Y_seg = np.reshape(Y_seg, (1,) + Y_seg.shape + (1,))

            with tf.device(gpu):
                pred = net.predict([X_vol, Y_vol])

            # Warp segments with flow
            flow = pred[1][0, :, :, :, :]
            sample = flow + grid
            sample = np.stack(
                (sample[:, :, :, 1], sample[:, :, :, 0], sample[:, :, :, 2]),
                3)
            warp_seg = interpn((yy, xx, zz),
                               X_seg,
                               sample,
                               method='nearest',
                               bounds_error=False,
                               fill_value=0)

            #print 'X Image size:', X_seg.shape
            #print 'warped Image size:', warp_seg.shape
            #print 'Y Image size:', Y_seg.shape

            vals, lnames = dice(warp_seg, Y_seg, nargout=2)
            print '%dth image to %dth image --- ' % (
                i + startoftest, j +
                startoftest), 'mean: ', np.mean(vals), 'std: ', np.std(vals)
            sum_dice = sum_dice + np.mean(vals)

            mat_Deform = pred[1][0, :, :, :, :]

            img_Deform = sitk.GetImageFromArray(mat_Deform)
            print 'img_Deform shape, ', mat_Deform.shape
            outputfilename = output_dir + 'deformations/' + 'Deform%02dto%02d.mha' % (
                i + startoftest, j + startoftest)

            sitk.WriteImage(img_Deform, outputfilename)

            mat_warpimage = pred[0][0, :, :, :, 0]
            img_Warp = sitk.GetImageFromArray(mat_warpimage)
            caster = sitk.CastImageFilter()
            caster.SetOutputPixelType(sitk.sitkInt16)
            img_Warp = caster.Execute(img_Warp)
            outputfilename = output_dir + 'warp_images/' + 'na%02dto%02d.mha' % (
                i + startoftest, j + startoftest)
            sitk.WriteImage(img_Warp, outputfilename)

            img_Warpseg = sitk.GetImageFromArray(warp_seg)
            img_Warpseg = caster.Execute(img_Warpseg)
            outputfilename = output_dir + 'warp_images/' + 'seg%02dto%02d.mha' % (
                i + startoftest, j + startoftest)
            sitk.WriteImage(img_Warpseg, outputfilename)

    print 'Average Dice ratio: ', sum_dice / (noftest**2)
Esempio n. 24
0
    with tf.variable_scope('autocontext') as scope:

        # phase 1

        raw_0 = tf.placeholder(tf.float32, shape=shape_0)
        raw_0_batched = tf.reshape(raw_0, (1, 1) + shape_0)

        input_0 = tf.concat([raw_0_batched, affs_0_batched], 1)
        if ignore:
            keep_raw = tf.ones_like(raw_0_batched)
            ignore_aff = tf.zeros_like(affs_0_batched)
            ignore_mask = tf.concat([keep_raw, ignore_aff], 1)
            input_0 = networks.ignore(input_0, ignore_mask)

        unet = networks.unet(input_0, 24, 3, [[2, 2, 2], [2, 2, 2], [2, 2, 2]])

        affs_1_batched = networks.conv_pass(unet,
                                            kernel_size=1,
                                            num_fmaps=3,
                                            num_repetitions=1,
                                            activation='sigmoid')

        affs_1 = tf.reshape(affs_1_batched, (3, ) + shape_1)
        gt_affs_1 = tf.placeholder(tf.float32, shape=(3, ) + shape_1)
        loss_weights_1 = tf.placeholder(tf.float32, shape=(3, ) + shape_1)

        loss_1 = tf.losses.mean_squared_error(gt_affs_1, affs_1,
                                              loss_weights_1)

        # phase 2
Esempio n. 25
0
	# Anatomical labels we want to evaluate
	labels = sio.loadmat('../data/labels.mat')['labels'][0]

	atlas = np.load('../data/atlas_norm.npz')
	atlas_vol = atlas['vol']
	atlas_seg = atlas['seg']
	atlas_vol = np.reshape(atlas_vol, (1,)+atlas_vol.shape+(1,))

	config = tf.ConfigProto()
	config.gpu_options.allow_growth = True
	config.allow_soft_placement = True
	set_session(tf.Session(config=config))

	# load weights of model
	with tf.device(gpu):
		net = networks.unet(vol_size, nf_enc, nf_dec)
		# net.load_weights('../models/' + model_name + '/' + str(iter_num) + '.h5')
		net.load_weights(model_name)

	xx = np.arange(vol_size[1])
	yy = np.arange(vol_size[0])
	zz = np.arange(vol_size[2])
	grid = np.rollaxis(np.array(np.meshgrid(xx, yy, zz)), 0, 4)

	X_vol, X_seg = datagenerators.load_example_by_name('../data/test_vol.npz', '../data/test_seg.npz')

	with tf.device(gpu):
		pred = net.predict([X_vol, atlas_vol])

	# Warp segments with flow
	flow = pred[1][0, :, :, :, :]
Esempio n. 26
0
def test(model_name,
         gpu_id,
         iter_num,
         vol_size=(160, 192, 224),
         nf_enc=[16, 32, 32, 32],
         nf_dec=[32, 32, 32, 32, 8, 8, 3]):
    """
	test

	nf_enc and nf_dec
	#nf_dec = [32,32,32,32,32,16,16,3]
    # This needs to be changed. Ideally, we could just call load_model, and we wont have to
    # specify the # of channels here, but the load_model is not working with the custom loss...
    """

    gpu = '/gpu:' + str(gpu_id)

    # Test file and anatomical labels we want to evaluate
    test_brain_file = open('../data/test_examples.txt')
    test_brain_strings = test_brain_file.readlines()
    test_brain_strings = [x.strip() for x in test_brain_strings]
    good_labels = sio.loadmat('../data/test_labels.mat')['labels'][0]

    atlas = np.load('../data/atlas_norm.npz')
    atlas_vol = atlas['vol']
    atlas_seg = atlas['seg']
    atlas_vol = np.reshape(atlas_vol, (1, ) + atlas_vol.shape + (1, ))

    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    config.allow_soft_placement = True
    set_session(tf.Session(config=config))

    # load weights of model
    with tf.device(gpu):
        net = networks.unet(vol_size, nf_enc, nf_dec)
        net.load_weights('../models/' + model_name + '/' + str(iter_num) +
                         '.h5')

    n_batches = len(test_brain_strings)
    xx = np.arange(vol_size[1])
    yy = np.arange(vol_size[0])
    zz = np.arange(vol_size[2])
    grid = np.rollaxis(np.array(np.meshgrid(xx, yy, zz)), 0, 4)

    dice_vals = np.zeros((len(good_labels), n_batches))

    np.random.seed(17)

    for k in range(0, n_batches):
        vol_name, seg_name = test_brain_strings[k].split(",")
        X_vol, X_seg = datagenerators.load_example_by_name(vol_name, seg_name)

        with tf.device(gpu):
            pred = net.predict([X_vol, atlas_vol])

        # Warp segments with flow
        flow = pred[1][0, :, :, :, :]
        sample = flow + grid
        sample = np.stack(
            (sample[:, :, :, 1], sample[:, :, :, 0], sample[:, :, :, 2]), 3)
        warp_seg = interpn((yy, xx, zz),
                           X_seg[0, :, :, :, 0],
                           sample,
                           method='nearest',
                           bounds_error=False,
                           fill_value=0)

        vals, labels = dice(warp_seg, atlas_seg, labels=good_labels, nargout=2)
        dice_vals[:, k] = vals
        print np.mean(dice_vals[:, k])
Esempio n. 27
0
import networks
import tensorflow as tf
import json

if __name__ == "__main__":

    raw = tf.placeholder(tf.float32, shape=(84, 268,268))
    raw_batched = tf.reshape(raw, (1, 1,) + (84, 268, 268))

    unet = networks.unet(raw_batched, 12, 6, [[1, 3, 3], [1, 3, 3], [1, 3, 3]])

    # raw = tf.placeholder(tf.float32, shape=(132,)*3)
    # raw_batched = tf.reshape(raw, (1, 1,) + (132,)*3)
    #
    # unet = networks.unet(raw_batched, 24, 3, [[2, 2, 2], [2, 2, 2], [2, 2, 2]])

    dist_batched = networks.conv_pass(
        unet,
        kernel_size=1,
        num_fmaps=2,
        num_repetitions=1,
        activation=None)

    syn_dist, bdy_dist = tf.unstack(dist_batched, 2, axis=1)
    #bdy_dist_batched = networks.conv_pass(unet,
    #                              kernel_size=1,
    #                              num_fmaps=1,
    #                              num_repetitions=1,
    #                              activation=None)

    output_shape = syn_dist.get_shape().as_list()