Exemplo n.º 1
0
def test(gpu, atlas_file, model_file):
    """
    model training function
    :param gpu: integer specifying the gpu to use
    :param atlas_file: atlas filename. So far we support npz file with a 'vol' variable
    :param model_file: the model directory to load from
    """

    os.environ["CUDA_VISIBLE_DEVICES"] = gpu
    device = "cuda"

    # Produce the loaded atlas with dims.:160x192x224.
    atlas = np.load(atlas_file)
    atlas_vol = atlas['vol']
    atlas_vol = atlas_vol[np.newaxis, ..., np.newaxis]
    atlas_seg = atlas['seg']

    # Test file and anatomical labels we want to evaluate
    test_file = open('../data/test_HCPChild.txt')
    test_strings = test_file.readlines()
    test_strings = [x.strip() for x in test_strings]
    good_labels = sio.loadmat('../data/labels.mat')['labels'][0]

    # Set up model
    criterion = LossFunction_mpr().cuda()
    model = MPR_net_HO(criterion)
    model.to(device)
    model.load_state_dict(
        torch.load(model_file, map_location=lambda storage, loc: storage))

    # set up atlas tensor
    input_fixed = torch.from_numpy(atlas_vol).to(device).float()
    input_fixed = input_fixed.permute(0, 4, 1, 2, 3)

    # Use this to warp segments
    trf = SpatialTransformer(atlas_vol.shape[1:-1], mode='nearest')
    trf.to(device)

    for k in range(0, len(test_strings)):

        vol_name, seg_name = test_strings[k].split(",")
        X_vol, X_seg = datagenerators.load_example_by_name(vol_name, seg_name)

        input_moving = torch.from_numpy(X_vol).to(device).float()
        input_moving = input_moving.permute(0, 4, 1, 2, 3)

        warp, flow, flow1, refine_flow1, flow2, refine_flow2 = model(
            input_moving, input_fixed)

        # Warp segment using flow
        moving_seg = torch.from_numpy(X_seg).to(device).float()
        moving_seg = moving_seg.permute(0, 4, 1, 2, 3)
        warp_seg = trf(moving_seg, flow).detach().cpu().numpy()

        vals, labels = dice(warp_seg, atlas_seg, labels=good_labels, nargout=2)
        print(np.mean(vals))
def test(model_name, iter_num, gpu_id, vol_size=(160,192,224), nf_enc=[16,32,32,32], nf_dec=[32,32,32,32,32,16,16,3]):
	"""
	test

	nf_enc and nf_dec
	#nf_dec = [32,32,32,32,32,16,16,3]
    # This needs to be changed. Ideally, we could just call load_model, and we wont have to
    # specify the # of channels here, but the load_model is not working with the custom loss...
    """  

	gpu = '/gpu:' + str(gpu_id)

	# Anatomical labels we want to evaluate
	labels = sio.loadmat('../data/labels.mat')['labels'][0]

	atlas = np.load('../data/atlas_norm.npz')
	atlas_vol = atlas['vol']
	atlas_seg = atlas['seg']
	atlas_vol = np.reshape(atlas_vol, (1,)+atlas_vol.shape+(1,))

	config = tf.ConfigProto()
	config.gpu_options.allow_growth = True
	config.allow_soft_placement = True
	set_session(tf.Session(config=config))

	# load weights of model
	with tf.device(gpu):
		net = networks.unet(vol_size, nf_enc, nf_dec)
		net.load_weights('../models/' + model_name +
                         '/' + str(iter_num) + '.h5')

	xx = np.arange(vol_size[1])#192
	yy = np.arange(vol_size[0])#160
	zz = np.arange(vol_size[2])#224
	grid = np.rollaxis(np.array(np.meshgrid(xx, yy, zz)), 0, 4)#(160,192,224,3) it stores the co-ordinate of the original position of the point in the

	X_vol, X_seg = datagenerators.load_example_by_name('../data/test_vol.npz', '../data/test_seg.npz')

	with tf.device(gpu):
		pred = net.predict([X_vol, atlas_vol])

	# Warp segments with flow
	flow = pred[1][0, :, :, :, :]#(160,192,224,3)
	sample = flow+grid#add the original position with the shift flow the dimension is: (160,192,224,3)
	sample = np.stack((sample[:, :, :, 1], sample[:, :, :, 0], sample[:, :, :, 2]), 3)
	warp_seg = interpn((yy, xx, zz), X_seg[0, :, :, :, 0], sample, method='nearest', bounds_error=False, fill_value=0)

	vals, _ = dice(warp_seg, atlas_seg, labels=labels, nargout=2)
	print(np.mean(vals), np.std(vals))
Exemplo n.º 3
0
def test(model_name, iter_num, gpu_id, vol_size=(160,192,224), nf_enc=[16,32,32,32], nf_dec=[32,32,32,32,32,16,16,3]):
	"""
	test

	nf_enc and nf_dec
	#nf_dec = [32,32,32,32,32,16,16,3]
    # This needs to be changed. Ideally, we could just call load_model, and we wont have to
    # specify the # of channels here, but the load_model is not working with the custom loss...
    """  

	gpu = '/gpu:' + str(gpu_id)

	# Anatomical labels we want to evaluate
	labels = sio.loadmat('../data/labels.mat')['labels'][0]

	atlas = np.load('../data/atlas_norm.npz')
	atlas_vol = atlas['vol']
	atlas_seg = atlas['seg']
	atlas_vol = np.reshape(atlas_vol, (1,)+atlas_vol.shape+(1,))

	config = tf.ConfigProto()
	config.gpu_options.allow_growth = True
	config.allow_soft_placement = True
	set_session(tf.Session(config=config))

	# load weights of model
	with tf.device(gpu):
		net = networks.unet(vol_size, nf_enc, nf_dec)
		net.load_weights('../models/' + model_name +
                         '/' + str(iter_num) + '.h5')

	xx = np.arange(vol_size[1])
	yy = np.arange(vol_size[0])
	zz = np.arange(vol_size[2])
	grid = np.rollaxis(np.array(np.meshgrid(xx, yy, zz)), 0, 4)

	X_vol, X_seg = datagenerators.load_example_by_name('../data/test_vol.npz', '../data/test_seg.npz')

	with tf.device(gpu):
		pred = net.predict([X_vol, atlas_vol])

	# Warp segments with flow
	flow = pred[1][0, :, :, :, :]
	sample = flow+grid
	sample = np.stack((sample[:, :, :, 1], sample[:, :, :, 0], sample[:, :, :, 2]), 3)
	warp_seg = interpn((yy, xx, zz), X_seg[0, :, :, :, 0], sample, method='nearest', bounds_error=False, fill_value=0)

	vals, _ = dice(warp_seg, atlas_seg, labels=labels, nargout=2)
	print(np.mean(vals), np.std(vals))
Exemplo n.º 4
0
def test(gpu, init_model_file, atlas_file):

    os.environ['CUDA_VISIBLE_DEVICES'] = gpu

    # Test file and anatomical labels we want to evaluate
    atlas_vol = load_volfile(atlas_file)
    atlas_vol = atlas_vol[np.newaxis, ..., np.newaxis]

    test_file = open('data/test_example.txt')
    test_strings = test_file.readlines()
    test_strings = [x.strip() for x in test_strings]

    # Set up model
    criterion = LossFunction_mpr_MIND().cuda()
    model = MPR_net_Tr(criterion)
    model = model.cuda()
    model.eval()
    print_network(model)
    model.load_state_dict(torch.load(init_model_file, map_location='cuda:0'))

    # set up atlas tensor
    input_fixed = torch.from_numpy(atlas_vol).cuda().float()
    input_fixed = input_fixed.permute(0, 4, 1, 2, 3)
    # Use this to warp segments
    trf = SpatialTransformer(atlas_vol.shape[1:-1], mode='nearest')
    trf = trf.cuda()

    for k in range(0, len(test_strings)):

        vol_name, seg_name = test_strings[k].split(",")
        X_vol, X_seg = datagenerators.load_example_by_name(vol_name, seg_name)

        input_moving = torch.from_numpy(X_vol).cuda().float()
        input_moving = input_moving.permute(0, 4, 1, 2, 3)
        with torch.no_grad():
            warp, flow, flow1, refine_flow1, flow2, refine_flow2 = model(
                input_moving, input_fixed)

        warp = warp.detach().cpu().numpy()
        warp = nib.Nifti1Image(warp[0, 0, :, :, :], np.eye(4))
        nib.save(warp, 'data/res-warped.nii.gz')

        flow = flow.permute(0, 2, 3, 4, 1)
        flow = flow.detach().cpu().numpy()
        flow = nib.Nifti1Image(flow[0, :, :, :, :], np.eye(4))
        nib.save(flow, 'data/res-flow.nii.gz')
Exemplo n.º 5
0
	config.gpu_options.allow_growth = True
	config.allow_soft_placement = True
	set_session(tf.Session(config=config))

	# load weights of model
	with tf.device(gpu):
		net = networks.unet(vol_size, nf_enc, nf_dec)
		# net.load_weights('../models/' + model_name + '/' + str(iter_num) + '.h5')
		net.load_weights(model_name)

	xx = np.arange(vol_size[1])
	yy = np.arange(vol_size[0])
	zz = np.arange(vol_size[2])
	grid = np.rollaxis(np.array(np.meshgrid(xx, yy, zz)), 0, 4)

	X_vol, X_seg = datagenerators.load_example_by_name('../data/test_vol.npz', '../data/test_seg.npz')

	with tf.device(gpu):
		pred = net.predict([X_vol, atlas_vol])

	# Warp segments with flow
	flow = pred[1][0, :, :, :, :]
	sample = flow+grid
	sample = np.stack((sample[:, :, :, 1], sample[:, :, :, 0], sample[:, :, :, 2]), 3)
	warp_seg = interpn((yy, xx, zz), X_seg[0, :, :, :, 0], sample, method='nearest', bounds_error=False, fill_value=0)

	vals, _ = dice(warp_seg, atlas_seg, labels=labels, nargout=2)
	print(np.mean(vals), np.std(vals))


if __name__ == "__main__":
Exemplo n.º 6
0
model.load_weights(m_dir)

# get label
labels = sio.loadmat('../data/labels.mat')['labels'][0]

# read validation data
valid_file = open('../data/validate_data.txt')
valid_strings = valid_file.readlines()
lenn = 5
vol_list = list()  # list of volume data
seg_list = list()  # list of segmentation data
for i in range(0, lenn):
    st = valid_strings[i].strip()
    vol_dir = '/home/ys895/resize256/resize256-crop_x32/FromEugenio_prep/vols/' + st
    seg_dir = '/home/ys895/resize256/resize256-crop_x32/FromEugenio_prep/labels/' + st
    X_vol, X_seg = datagenerators.load_example_by_name(vol_dir, seg_dir)
    vol_list.append(X_vol)
    seg_list.append(X_seg)

# outcome seg
f_seg = np.zeros((1, 64, 64, 64, 30))

# test the data on the model
# the size of test data is 5
cnt = 1
for i in range(0, 5):
    # rand_num = random.randint(0, 18)
    ii = i
    X_vol = vol_list[ii]
    X_seg = seg_list[ii]
Exemplo n.º 7
0
def normalize(model_name, iter_num, gpu_id, n_test, vol_size=(160, 192, 224)):
    start_time = time.time()
    gpu = '/gpu:' + str(gpu_id)

    os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'

    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    config.allow_soft_placement = True
    set_session(tf.Session(config=config))

    # load weights of model
    with tf.device(gpu):
        seg_path = '../models/' + model_name + '/' + str(iter_num) + '.h5'
        feature_model, num_features = networks.segmenter_feature_model(
            seg_path)

    feature_stats = [{} for i in range(len(num_features))]
    percentiles = [0.1, 1, 5, 10, 90, 95, 99, 99.9]

    for step in range(0, n_test):
        vol_name, seg_name = test_brain_strings[step].split(",")
        X_vol, X_seg = datagenerators.load_example_by_name(vol_name, seg_name)
        print('X_vol shape', X_vol.shape)

        with tf.device(gpu):
            print('input', feature_model.inputs)
            print('actual',
                  tf.transpose(X_vol[0, :, :, :, :], perm=[2, 0, 1, 3]).shape)
            enc_array = feature_model.predict(
                [np.transpose(X_vol[0, :, :, :, :], (2, 0, 1, 3))],
                batch_size=16)

        print(step, time.time() - start_time)
        for i in range(len(num_features)):
            enc = enc_array[i]
            # batch size is 1
            mean = np.mean(enc, axis=(0, 1, 2))
            mean_keepdim = np.mean(enc, axis=(0, 1, 2), keepdims=True)
            var = np.mean(np.square(enc - mean_keepdim), axis=(0, 1, 2))
            means = feature_stats[i].get('mean', [])
            variances = feature_stats[i].get('var', [])
            means.append(mean)
            variances.append(var)
            feature_stats[i]['mean'] = means
            feature_stats[i]['var'] = variances

            for q in percentiles:
                pc = np.percentile(enc, q, axis=(0, 1, 2))
                lst = feature_stats[i].get(q, [])
                lst.append(pc)
                feature_stats[i][q] = lst
        print(step, time.time() - start_time)

    for i in range(len(num_features)):
        for key, value in feature_stats[i].items():
            if key == 'mean' or 'var':
                feature_stats[i][key] = np.mean(np.array(value), axis=0)
            else:
                feature_stats[i][key] = np.median(np.array(value), axis=0)

    print(feature_stats)
    with open('seg_feature_stats.txt', 'wb') as file:
        file.write(pickle.dumps(
            feature_stats))  # use `pickle.loads` to do the reverse
Exemplo n.º 8
0
def test(gpu, atlas_file, model, init_model_file):
    """
    model training function
    :param gpu: integer specifying the gpu to use
    :param atlas_file: atlas filename. So far we support npz file with a 'vol' variable
    :param model: either vm1 or vm2 (based on CVPR 2018 paper)
    :param init_model_file: the model directory to load from
    """

    os.environ["CUDA_VISIBLE_DEVICES"] = gpu
    device = "cuda"

    # Produce the loaded atlas with dims.:160x192x224.
    atlas = np.load(atlas_file)
    atlas_vol = atlas['vol'][np.newaxis, ..., np.newaxis]
    atlas_seg = atlas['seg']
    vol_size = atlas_vol.shape[1:-1]

    # Test file and anatomical labels we want to evaluate
    test_file = open('../voxelmorph/data/val_examples.txt')
    test_strings = test_file.readlines()
    test_strings = [x.strip() for x in test_strings]
    good_labels = sio.loadmat(
        '../voxelmorph/data/test_labels.mat')['labels'][0]

    # Prepare the vm1 or vm2 model and send to device
    nf_enc = [16, 32, 32, 32]
    if model == "vm1":
        nf_dec = [32, 32, 32, 32, 8, 8]
    elif model == "vm2":
        nf_dec = [32, 32, 32, 32, 32, 16, 16]

    # Set up model
    model = cvpr2018_net(vol_size, nf_enc, nf_dec)
    model.to(device)
    model.load_state_dict(
        torch.load(init_model_file, map_location=lambda storage, loc: storage))

    # set up atlas tensor
    input_fixed = torch.from_numpy(atlas_vol).to(device).float()
    input_fixed = input_fixed.permute(0, 4, 1, 2, 3)

    # Use this to warp segments
    trf = SpatialTransformer(atlas_vol.shape[1:-1], mode='nearest')
    trf.to(device)

    for k in range(0, len(test_strings)):

        vol_name, seg_name = test_strings[k].split(",")
        X_vol, X_seg = datagenerators.load_example_by_name(vol_name, seg_name)

        input_moving = torch.from_numpy(X_vol).to(device).float()
        input_moving = input_moving.permute(0, 4, 1, 2, 3)

        warp, flow = model(input_moving, input_fixed)

        # Warp segment using flow
        moving_seg = torch.from_numpy(X_seg).to(device).float()
        moving_seg = moving_seg.permute(0, 4, 1, 2, 3)
        warp_seg = trf(moving_seg, flow).detach().cpu().numpy()

        vals, labels = dice(warp_seg, atlas_seg, labels=good_labels, nargout=2)
        #dice_vals[:, k] = vals
        #print(np.mean(dice_vals[:, k]))
        print(np.mean(vals))
def test(
        gpu_id,
        model_dir,
        iter_num,
        compute_type='GPU',  # GPU or CPU
        vol_size=(160, 192, 224),
        nf_enc=[16, 32, 32, 32],
        nf_dec=[32, 32, 32, 32, 16, 3],
        save_file=None):
    """
    test via segmetnation propagation
    works by iterating over some iamge files, registering them to atlas,
    propagating the warps, then computing Dice with atlas segmentations
    """

    # GPU handling
    gpu = '/gpu:' + str(gpu_id)
    os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_id)
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    config.allow_soft_placement = True
    set_session(tf.Session(config=config))

    # load weights of model
    with tf.device(gpu):
        # if testing miccai run, should be xy indexing.
        net = networks.miccai2018_net(vol_size,
                                      nf_enc,
                                      nf_dec,
                                      use_miccai_int=True,
                                      indexing='xy')
        net.load_weights(os.path.join(model_dir, str(iter_num) + '.h5'))

        # compose diffeomorphic flow output model
        diff_net = keras.models.Model(net.inputs,
                                      net.get_layer('diffflow').output)

        # NN transfer model
        nn_trf_model = networks.nn_trf(vol_size)

    # if CPU, prepare grid
    if compute_type == 'CPU':
        grid, xx, yy, zz = util.volshape2grid_3d(vol_size, nargout=4)

    # prepare a matrix of dice values
    dice_vals = np.zeros((len(good_labels), n_batches))
    for k in range(n_batches):
        # get data
        vol_name, seg_name = test_brain_strings[k].split(",")
        X_vol, X_seg = datagenerators.load_example_by_name(vol_name, seg_name)

        # predict transform
        with tf.device(gpu):
            pred = diff_net.predict([X_vol, atlas_vol])

        # Warp segments with flow
        if compute_type == 'CPU':
            flow = pred[0, :, :, :, :]
            warp_seg = util.warp_seg(X_seg,
                                     flow,
                                     grid=grid,
                                     xx=xx,
                                     yy=yy,
                                     zz=zz)

        else:  # GPU
            # Rigid registration only by GPU
            flow = pred[0, :, :, :, :]
            # Compute A(all about coordinate computation)
            x = np.linspace(0, 160 - 16, sample_num)
            x = x.astype(np.int32)
            y = np.linspace(0, 190 - 19, sample_num)
            y = y.astype(np.int32)
            z = np.linspace(0, 220 - 22, sample_num)
            z = z.astype(np.int32)
            index = np.rollaxis(np.array(np.meshgrid(x, y, z)), 0, 4)
            x = index[:, :, :, 0]
            y = index[:, :, :, 1]
            z = index[:, :, :, 2]

            # Y in formula
            x_flow = np.arange(vol_size[0])
            y_flow = np.arange(vol_size[1])
            z_flow = np.arange(vol_size[2])
            grid = np.rollaxis(np.array((np.meshgrid(y_flow, x_flow, z_flow))),
                               0, 4)  # original coordinate
            grid_x = grid_sample(x, y, z, grid[:, :, :, 0], sample_num)
            grid_y = grid_sample(x, y, z, grid[:, :, :, 1], sample_num)
            grid_z = grid_sample(x, y, z, grid[:, :, :, 2],
                                 sample_num)  # X (10,10,10)

            sample = flow + grid
            sample_x = grid_sample(x, y, z, sample[:, :, :, 0], sample_num)
            sample_y = grid_sample(x, y, z, sample[:, :, :, 1], sample_num)
            sample_z = grid_sample(x, y, z, sample[:, :, :, 2],
                                   sample_num)  # Y (10,10,10)

            sum_x = np.sum(flow[:, :, :, 0])
            sum_y = np.sum(flow[:, :, :, 1])
            sum_z = np.sum(flow[:, :, :, 2])

            ave_x = sum_x / (vol_size[0] * vol_size[1] * vol_size[2])
            ave_y = sum_y / (vol_size[0] * vol_size[1] * vol_size[2])
            ave_z = sum_z / (vol_size[0] * vol_size[1] * vol_size[2])

            # formula
            Y = np.zeros((10, 10, 10, grid_dimension))
            X = np.zeros((10, 10, 10, grid_dimension))
            T = np.array([ave_x, ave_y, ave_z, 1])  # (4,1)
            # R = np.zeros((10, 10, 10, grid_dimension, grid_dimension))

            for i in np.arange(10):
                for j in np.arange(10):
                    for z in np.arange(10):
                        Y[i, j, z, :] = np.array([
                            sample_x[i, j, z], sample_y[i, j, z],
                            sample_z[i, j, z], 1
                        ])

            for i in np.arange(10):
                for j in np.arange(10):
                    for z in np.arange(10):
                        X[i, j, z, :] = np.array([
                            grid_x[i, j, z], grid_y[i, j, z], grid_z[i, j, z],
                            1
                        ])

            X = X.reshape((1000, grid_dimension))
            Y = Y.reshape((1000, grid_dimension))
            R = np.dot(
                np.dot(np.linalg.pinv(np.dot(np.transpose(X), X)),
                       np.transpose(X)), Y)  # R

            # build new grid(Use R to do the spatial transform)
            shifted_x = np.arange(vol_size[0])
            shifted_y = np.arange(vol_size[1])
            shifted_z = np.arange(vol_size[2])
            print(shifted_x.shape)
            print(shifted_y.shape)
            print(shifted_z.shape)
            shifted_grid = np.rollaxis(
                np.array((np.meshgrid(shifted_y, shifted_x, shifted_z))), 0, 4)
            print(shifted_grid.shape)
            for i in np.arange(vol_size[0]):
                for j in np.arange(vol_size[1]):
                    for z in np.arange(vol_size[2]):
                        coordinates = np.dot(
                            R,
                            np.array([i, j, z, 1]).reshape(4, 1)) + T.reshape(
                                4, 1)
                        print("voxel." + '(' + str(i) + ',' + str(j) + ',' +
                              str(z) + ')')
                        shifted_grid[i, j, z, 0] = coordinates[0]
                        shifted_grid[i, j, z, 1] = coordinates[1]
                        shifted_grid[i, j, z, 2] = coordinates[2]

            # interpolation
            xx = np.arange(vol_size[1])
            yy = np.arange(vol_size[0])
            zz = np.arange(vol_size[2])
            warp_seg = interpn((yy, xx, zz),
                               X_seg[0, :, :, :, 0],
                               shifted_grid,
                               method='nearest',
                               bounds_error=False,
                               fill_value=0)

            # CVPR
            grid = np.rollaxis(np.array(np.meshgrid(xx, yy, zz)), 0, 4)
            sample = flow + grid
            sample = np.stack(
                (sample[:, :, :, 1], sample[:, :, :, 0], sample[:, :, :, 2]),
                3)
            warp_seg2 = interpn((yy, xx, zz),
                                X_seg[0, :, :, :, 0],
                                sample,
                                method='nearest',
                                bounds_error=False,
                                fill_value=0)

            # compute dice
            vals, _ = dice(warp_seg, atlas_seg, labels=labels, nargout=2)
            vals2, _ = dice(X_seg[0, :, :, :, 0],
                            atlas_seg,
                            labels=labels,
                            nargout=2)
            vals3, _ = dice(warp_seg2, atlas_seg, labels=labels, nargout=2)
            print("dice before:")
            print(np.mean(vals2), np.std(vals2))
            print("dice after deformable registration:")
            print(np.mean(vals3), np.std(vals3))
            print("dice after rigid registration:")
            print(np.mean(vals), np.std(vals))
            warp_seg = nn_trf_model.predict([X_seg, pred])[0, ..., 0]

        # compute Volume Overlap (Dice)
        dice_vals[:, k] = dice(warp_seg, atlas_seg, labels=good_labels)
        print('%3d %5.3f %5.3f' % (k, np.mean(
            dice_vals[:, k]), np.mean(np.mean(dice_vals[:, :k + 1]))))

        if save_file is not None:
            sio.savemat(save_file, {
                'dice_vals': dice_vals,
                'labels': good_labels
            })
Exemplo n.º 10
0
from scipy.interpolate import interpn
import matplotlib.pyplot as plt

# project
sys.path.append('../ext/medipy-lib')
sys.path.append('../ext/neuron')
sys.path.append('../ext/pynd-lib')
sys.path.append('../ext/pytools-lib')

import medipy
import networks
from medipy.metrics import dice
import datagenerators
import neuron as nu

X_vol, X_seg = datagenerators.load_example_by_name('/home/ys895/resize256/resize256-crop_x32/FromEugenio_prep/vols/990114_vc722.npz',
                                                   '/home/ys895/resize256/resize256-crop_x32/FromEugenio_prep/labels/990114_vc722.npz') # (160, 192, 224)
print('volume shape')
print(X_vol.shape)
print('seg shape')
print(X_seg.shape)
#X_seg_slice = X_seg[0, :, :, :, 0 ]
#print(X_seg_slice)
#X_seg_slice.reshape([X_seg_slice.shape[0],X_seg_slice.shape[2]])
#print(X_seg_slice.shape)
#X_seg_slice = X_seg_slice.reshape((X_seg_slice.shape[1],X_seg_slice.shape[0],X_seg_slice.shape[2]))
#X_seg_slice = X_seg_slice.reshape((X_seg_slice.shape[2],X_seg_slice.shape[1],X_seg_slice.shape[0]))
#for i in range(0,X_seg_slice.shape[0]):
#    list.insert(X_seg_slice[i,:,:])
#X_seg_slice = X_seg_slice.reshape([X_seg_slice.shape[0],X_seg_slice.shape[1],X_seg_slice.shape[2]])
#X_seg_slice = [X_seg_slice]
#fig,axs = nu.plot.slices(X_seg_slice)
Exemplo n.º 11
0
def normalize(model_name,
              iter_num,
              gpu_id,
              n_test,
              vol_size=(160, 192, 224),
              nf_enc=[16, 32, 32],
              nf_dec=[32, 32, 32]):
    """
    test

    nf_enc and nf_dec
    #nf_dec = [32,32,32,32,32,16,16,3]
    # This needs to be changed. Ideally, we could just call load_model, and we wont have to
    # specify the # of channels here, but the load_model is not working with the custom loss...
    """
    start_time = time.time()
    gpu = '/gpu:' + str(gpu_id)

    os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'

    # Anatomical labels we want to evaluate
    labels = sio.loadmat('../data/labels.mat')['labels'][0]

    atlas = np.load('../data/atlas_norm.npz')
    atlas_vol = atlas['vol']
    atlas_seg = atlas['seg']
    atlas_vol = np.reshape(atlas_vol, (1, ) + atlas_vol.shape + (1, ))

    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    config.allow_soft_placement = True
    set_session(tf.Session(config=config))

    # load weights of model
    with tf.device(gpu):
        full_model, train_model = networks.autoencoder(vol_size, nf_enc,
                                                       nf_dec)
        full_model.load_weights('../models/' + model_name + '/' +
                                str(iter_num) + '.h5')

    xx = np.arange(vol_size[1])
    yy = np.arange(vol_size[0])
    zz = np.arange(vol_size[2])
    grid = np.rollaxis(np.array(np.meshgrid(xx, yy, zz)), 0, 4)

    feature_stats = [{} for i in range(len(nf_enc))]
    percentiles = [0.1, 1, 5, 10, 90, 95, 99, 99.9]

    for step in range(0, n_test):
        vol_name, seg_name = test_brain_strings[step].split(",")
        X_vol, X_seg = datagenerators.load_example_by_name(vol_name, seg_name)

        with tf.device(gpu):
            enc_array = full_model.predict([X_vol])
            output = enc_array[0]

        for i in range(len(nf_enc)):
            enc = enc_array[i + 1]
            # batch size is 1
            mean = np.mean(enc, axis=(0, 1, 2, 3))
            mean_keepdim = np.mean(enc, axis=(0, 1, 2, 3), keepdims=True)
            var = np.mean(np.square(enc - mean_keepdim), axis=(0, 1, 2, 3))
            means = feature_stats[i].get('mean', [])
            variances = feature_stats[i].get('var', [])
            means.append(mean)
            variances.append(var)
            feature_stats[i]['mean'] = means
            feature_stats[i]['var'] = variances

            for q in percentiles:
                pc = np.percentile(enc, q, axis=(0, 1, 2, 3))
                lst = feature_stats[i].get(q, [])
                lst.append(pc)
                feature_stats[i][q] = lst
        print(step)

    for i in range(len(nf_enc)):
        for key, value in feature_stats[i].items():
            if key == 'mean' or 'var':
                feature_stats[i][key] = np.mean(np.array(value), axis=0)
            else:
                feature_stats[i][key] = np.median(np.array(value), axis=0)

    print(feature_stats)
    with open('feature_stats.txt', 'wb') as file:
        file.write(pickle.dumps(
            feature_stats))  # use `pickle.loads` to do the reverse
Exemplo n.º 12
0
def test(gpu, size, data_dir, atlas_dir, model, init_model_file, saveDir,
         nr_val_data):
    """
    model testing function
    :param gpu: integer specifying the gpu to use
    :param size: integer related to desired size of the input images
    :param data_dir: String describing the location of the data
    :param atlas_dir: String where atlases are located
    :param model: either vm1 or vm2 (based on CVPR 2018 paper)
    :param init_model_file: the model directory to load from
    :param saveDir: String specifiying the direction to store the outputs
    :param nr_val_data: the number of validation samples must corresponed to the valiable for the train function
    """

    #set gpu
    os.environ["CUDA_VISIBLE_DEVICES"] = gpu
    device = "cuda"

    #set size
    vol_size = np.array([size, size, size])

    # Prepare the vm1 or vm2 model and send to device
    nf_enc = [16, 32, 32, 32]
    if model == "vm1":
        nf_dec = [32, 32, 32, 32, 8, 8]
    elif model == "vm2":
        nf_dec = [32, 32, 32, 32, 32, 16, 16]

    #sim_loss_fn = losses.ncc_loss if data_loss == "ncc" else losses.mse_loss
    sim_loss_fn = losses.mse_loss

    # Set up model
    model = cvpr2018_net(vol_size, nf_enc, nf_dec)
    model.to(device)
    model.load_state_dict(
        torch.load(init_model_file, map_location=lambda storage, loc: storage))

    # Use this to warp segments
    trf = SpatialTransformer(vol_size, mode='nearest')
    trf.to(device)
    test_strings = glob.glob(os.path.join(data_dir, '*.nii'))
    test_strings = test_strings[:nr_val_data]
    test_strings = [i for i in test_strings if "L1-L3" in i]
    #test_strings = ['.\\Data\\Train\\64\\3YQ_unhealthyL2_L1-L3.nii','.\\Data\\Train\\64\\x1b_unhealthyL2_L1-L3.nii' ]
    mean_val_loss = 0
    #iteration over the test volumes
    for k in range(0, len(test_strings)):

        #load the data create fixed and moving image
        X_vol, atlas_vol = datagenerators.load_example_by_name(
            test_strings[k], atlas_dir, size)

        input_fixed = torch.from_numpy(atlas_vol).to(device).float()
        input_fixed = input_fixed.permute(0, 4, 1, 2, 3)

        input_moving = torch.from_numpy(X_vol).to(device).float()
        input_moving = input_moving.permute(0, 4, 1, 2, 3)

        #produce the warp field
        warp, flow = model(input_moving, input_fixed)

        flow_vectors = flow[0]
        shape = flow_vectors.shape

        #plot the middle slice of the vector field
        #flow_vectors  = flow_vectors.permute(1, 2, 3, 0)
        #flow_vectors = flow_vectors.detach().cpu().numpy()
        #flow_vectors_middle = flow_vectors[:,:,int(shape[2]/2),0:2]
        #print('shape')
        #print(flow_vectors_middle.shape)
        #flow_vectors_middle = flow_vectors_middle.squeeze()
        #fig, axes = neuron.plot.flow([flow_vectors_middle], width=5,show = False)
        #print(type(fig))
        #fig.savefig(os.path.join(get_outputs_path(), test_strings[k][len(data_dir):-4] +'.png'))

        # generate the new sample with the vector field
        warped = trf(input_moving, flow).detach().cpu().numpy()

        mean_val_loss += sim_loss_fn(warp, input_fixed)

        warped = np.squeeze(warped)
        #print(warped.shape)
        #plot_middle_slices(warped)

        # store the generated volume
        img = nib.Nifti1Image(warped, np.eye(4))
        nib.save(img, os.path.join(saveDir, test_strings[k][len(data_dir):]))
    mean_val_loss /= len(test_strings)
    print("Mean validation loss: ")
    print(mean_val_loss)
Exemplo n.º 13
0
def test(
        gpu_id,
        model_dir,
        iter_num,
        compute_type='GPU',  # GPU or CPU
        vol_size=(160, 192, 224),
        nf_enc=[16, 32, 32, 32],
        nf_dec=[32, 32, 32, 32, 16, 3],
        save_file=None):
    """
    test via segmetnation propagation
    works by iterating over some iamge files, registering them to atlas,
    propagating the warps, then computing Dice with atlas segmentations
    """

    # GPU handling
    gpu = '/gpu:' + str(gpu_id)
    os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_id)
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    config.allow_soft_placement = True
    set_session(tf.Session(config=config))

    # load weights of model
    with tf.device(gpu):
        # if testing miccai run, should be xy indexing.
        net = networks.miccai2018_net(vol_size,
                                      nf_enc,
                                      nf_dec,
                                      use_miccai_int=False,
                                      indexing='ij')
        net.load_weights(os.path.join(model_dir, str(iter_num) + '.h5'))

        # compose diffeomorphic flow output model
        diff_net = keras.models.Model(net.inputs,
                                      net.get_layer('diffflow').output)

        # NN transfer model
        nn_trf_model = networks.nn_trf(vol_size, indexing='ij')

    # if CPU, prepare grid
    if compute_type == 'CPU':
        grid, xx, yy, zz = util.volshape2grid_3d(vol_size, nargout=4)

    # prepare a matrix of dice values
    dice_vals = np.zeros((len(good_labels), n_batches))
    for k in range(n_batches):
        # get data
        vol_name, seg_name = test_brain_strings[k].split(",")
        X_vol, X_seg = datagenerators.load_example_by_name(vol_name, seg_name)

        # predict transform
        with tf.device(gpu):
            pred = diff_net.predict([X_vol, atlas_vol])

        # Warp segments with flow
        if compute_type == 'CPU':
            flow = pred[0, :, :, :, :]
            warp_seg = util.warp_seg(X_seg,
                                     flow,
                                     grid=grid,
                                     xx=xx,
                                     yy=yy,
                                     zz=zz)

        else:  # GPU
            warp_seg = nn_trf_model.predict([X_seg, pred])[0, ..., 0]

        # compute Volume Overlap (Dice)
        dice_vals[:, k] = dice(warp_seg, atlas_seg, labels=good_labels)
        print('%3d %5.3f %5.3f' % (k, np.mean(
            dice_vals[:, k]), np.mean(np.mean(dice_vals[:, :k + 1]))))

        if save_file is not None:
            sio.savemat(save_file, {
                'dice_vals': dice_vals,
                'labels': good_labels
            })
Exemplo n.º 14
0
def test(load_iters, gpu_id, vol_size=(160,192,224), nf_enc=[16,32,32,32], nf_dec=[32,32,32,32,32,16,16,3], sample_num = 10, grid_dimension = 4):
    """
    Test of the rigid registration by calculating the dice score between the atlas's segmentation and warped image's segmentation
    :param iter_num: iteration number
    :param gpu_id: gpu id
    :param vol_size: volume's size
    :param nf_enc: number of encode
    :param nf_dec: number of decoder
    :param model_name: load model's name
    :param sample_num: sample grid's dimension, this can be changed to improve the performance
    :param grid_dimension: R(in the formula)'s dimension
    :return: None
    """
    gpu = '/gpu:' + str(gpu_id)

    # Anatomical labels we want to evaluate
    labels = sio.loadmat('../data/labels.mat')['labels'][0]

    atlas = np.load('../data/atlas_norm.npz')
    atlas_vol = atlas['vol']
    atlas_seg = atlas['seg']
    atlas_vol = np.reshape(atlas_vol, (1,)+atlas_vol.shape+(1,))

    os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_id)
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    config.allow_soft_placement = True
    set_session(tf.Session(config=config))

    # load weights of model
    with tf.device(gpu):
        net = networks.unet(vol_size, nf_enc, nf_dec)
        net.load_weights('../rigid_model/' + load_iters + '.h5', by_name=True)

    X_vol, X_seg = datagenerators.load_example_by_name('../data/test_vol.npz', '../data/test_seg.npz')

    orig_vol = X_vol

    theta = 0
    beta = 4
    omega = 0
    X_seg = rotate_img(X_seg[0, :, :, :, 0], theta=theta, beta=beta, omega=omega)
    X_vol = rotate_img(X_vol[0, :, :, :, 0], theta=theta, beta=beta, omega=omega)
    X_seg = X_seg.reshape((1,) + X_seg.shape + (1,))
    X_vol = X_vol.reshape((1,) + X_vol.shape + (1,))

    with tf.device(gpu):
        pred = net.predict([X_vol, atlas_vol])

    # get flow
    flow = pred[1][0, :, :, :, :]

    # sample coordinate(sample_num * sample_num * sample_num)
    x = np.linspace(0, (vol_size[0]/sample_num)*(sample_num-1), sample_num)
    x = x.astype(np.int32)
    y = np.linspace(0, (vol_size[1]/sample_num)*(sample_num-1), sample_num)
    y = y.astype(np.int32)
    z = np.linspace(0, (vol_size[2]/sample_num)*(sample_num-1), sample_num)
    z = z.astype(np.int32)
    index = np.rollaxis(np.array(np.meshgrid(y, x, z)), 0, 4)
    x = index[:, :, :, 1]
    y = index[:, :, :, 0]
    z = index[:, :, :, 2]

    # Y in formula
    x_flow = np.arange(vol_size[0])
    y_flow = np.arange(vol_size[1])
    z_flow = np.arange(vol_size[2])
    grid = np.rollaxis(np.array((np.meshgrid(y_flow, x_flow, z_flow))), 0, 4)# original coordinate
    grid_x = grid_sample(x, y, z, grid[:, :, :, 1], sample_num)
    grid_y = grid_sample(x, y, z, grid[:, :, :, 0], sample_num)
    grid_z = grid_sample(x, y, z, grid[:, :, :, 2], sample_num)#X (10,10,10)

    sample = flow + grid
    sample_x = grid_sample(x, y, z, sample[:, :, :, 1], sample_num)
    sample_y = grid_sample(x, y, z, sample[:, :, :, 0], sample_num)
    sample_z = grid_sample(x, y, z, sample[:, :, :, 2], sample_num)#Y (10,10,10)

    sum_x = np.sum(flow[:, :, :, 1])
    sum_y = np.sum(flow[:, :, :, 0])
    sum_z = np.sum(flow[:, :, :, 2])

    ave_x = sum_x/(vol_size[0] * vol_size[1] * vol_size[2])
    ave_y = sum_y/(vol_size[0] * vol_size[1] * vol_size[2])
    ave_z = sum_z/(vol_size[0] * vol_size[1] * vol_size[2])

    # formula
    Y = np.zeros((sample_num, sample_num, sample_num, grid_dimension))
    X = np.zeros((sample_num, sample_num, sample_num, grid_dimension))
    T = np.array([ave_x, ave_y, ave_z, 1])#(4,1)
    #R = np.zeros((10, 10, 10, grid_dimension, grid_dimension))

    for i in np.arange(sample_num):
        for j in np.arange(sample_num):
            for z in np.arange(sample_num):
                Y[i, j, z, :] = np.array([sample_x[i,j,z], sample_y[i,j,z], sample_z[i,j,z], 1])
                Y[i, j, z, :] = Y[i, j, z, :] - T# amend: Y` = Y - T

    for i in np.arange(sample_num):
        for j in np.arange(sample_num):
            for z in np.arange(sample_num):
                X[i, j, z, :] = np.array([grid_x[i, j, z], grid_y[i, j, z], grid_z[i, j, z], 1])

    X = X.reshape((sample_num * sample_num * sample_num, grid_dimension))
    Y = Y.reshape((sample_num * sample_num * sample_num, grid_dimension))
    R = np.dot(np.dot(np.linalg.pinv(np.dot(np.transpose(X), X)), np.transpose(X)), Y)# R
    print(R)
    # build new grid(Use R to do the spatial transform)
    shifted_x = np.arange(vol_size[0])
    shifted_y = np.arange(vol_size[1])
    shifted_z = np.arange(vol_size[2])
    shifted_grid = np.rollaxis(np.array((np.meshgrid(shifted_y, shifted_x, shifted_z))), 0, 4)

    for i in np.arange(vol_size[0]):
        for j in np.arange(vol_size[1]):
            for z in np.arange(vol_size[2]):
                coordinates = np.dot(R, np.array([i, j, z, 1]).reshape(4,1)) +  T.reshape(4,1)
                #print("voxel." + '(' + str(i) + ',' + str(j) + ',' + str(z) + ')')
                shifted_grid[i, j, z, 1] = coordinates[0]
                shifted_grid[i, j, z, 0] = coordinates[1]
                shifted_grid[i, j, z, 2] = coordinates[2]

    # interpolation
    xx = np.arange(vol_size[1])
    yy = np.arange(vol_size[0])
    zz = np.arange(vol_size[2])
    shifted_grid = np.stack((shifted_grid[:, :, :, 1], shifted_grid[:, :, :, 0], shifted_grid[:, :, :, 2]), 3)# notice: the shifted_grid is reverse in x and y, so this step is used for making it back.
    warp_seg = interpn((yy, xx, zz), X_seg[0, :, :, :, 0], shifted_grid, method='nearest', bounds_error=False, fill_value=0)# rigid registration
    warp_vol = interpn((yy, xx, zz), X_vol[0, :, :, :, 0], shifted_grid, method='nearest', bounds_error=False, fill_value=0)# rigid registration

    # CVPR
    #grid = np.rollaxis(np.array(np.meshgrid(xx, yy, zz)), 0, 4)
    #sample = flow + grid
    #sample = np.stack((sample[:, :, :, 1], sample[:, :, :, 0], sample[:, :, :, 2]), 3)
    #warp_seg2 = interpn((yy, xx, zz), X_seg[0, :, :, :, 0], sample, method='nearest', bounds_error=False, fill_value=0)# deformable registration

    # compute dice
    vals, _ = dice(warp_seg, atlas_seg, labels=labels, nargout=2)
    #vals2, _ = dice(X_seg[0, :, :, :, 0], atlas_seg, labels=labels, nargout=2)
    #vals3, _ = dice(warp_seg2, atlas_seg, labels=labels, nargout=2)
    #print("dice before:")
    #print(np.mean(vals2), np.std(vals2))
    #print("dice after deformable registration:")
    #print(np.mean(vals3), np.std(vals3))
    print("dice after rigid registration:")
    print(np.mean(vals), np.std(vals))

    # plot
    #fig1, axs1 = nplt.slices(warp_seg[100, :, :], do_colorbars=True)
    #fig1.savefig('warp_seg100.png')
    #fig2, axs2 = nplt.slices(warp_seg[130, :, :], do_colorbars=True)
    #fig2.savefig('warp_seg130.png')
    #fig3, axs3 = nplt.slices(atlas_seg[100, :, :], do_colorbars=True)
    #fig3.savefig('atlas_seg100.png')
    #fig4, axs4 = nplt.slices(atlas_seg[130, :, :], do_colorbars=True)
    #fig4.savefig('atlas_seg130.png')

    # specify slice
    num_slice = 90

    plt.figure()
    plt.subplot(1, 3, 1)
    plt.imshow(orig_vol[0, :, num_slice, :, 0])
    plt.subplot(1, 3, 2)
    plt.imshow(X_vol[0, :, num_slice, :, 0])
    plt.subplot(1, 3, 3)
    plt.imshow(warp_vol[:, num_slice, :])
    plt.savefig("slice" + str(num_slice) + '_' + str(k) + ".png")
Exemplo n.º 15
0
def test(model_name,
         iter_num,
         gpu_id,
         n_test,
         filename,
         vol_size=(160, 192, 224),
         nf_enc=[16, 32, 32, 32],
         nf_dec=[32, 32, 32, 32, 32, 16, 16]):
    """
    test

    nf_enc and nf_dec
    #nf_dec = [32,32,32,32,32,16,16,3]
    # This needs to be changed. Ideally, we could just call load_model, and we wont have to
    # specify the # of channels here, but the load_model is not working with the custom loss...
    """
    start_time = time.time()
    gpu = '/gpu:' + str(gpu_id)
    print(gpu)
    os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'

    # Anatomical labels we want to evaluate
    labels = sio.loadmat('../data/labels.mat')['labels'][0]

    atlas = np.load('../data/atlas_norm.npz')
    atlas_vol = atlas['vol']
    atlas_seg = atlas['seg']
    atlas_vol = np.reshape(atlas_vol, (1, ) + atlas_vol.shape + (1, ))

    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    config.allow_soft_placement = True
    set_session(tf.Session(config=config))

    # load weights of model
    with tf.device(gpu):
        net = networks.unet(vol_size, nf_enc, nf_dec)
        net.load_weights('../models/' + model_name + '/' + str(iter_num) +
                         '.h5')

    seg_path = '../models/seg_pretrained/0.h5'
    feature_model, num_features = networks.segmenter_feature_model(seg_path)

    with open('seg_feature_stats.txt', 'rb') as file:
        feature_stats = pickle.loads(
            file.read())  # use `pickle.loads` to do the reverse

    xx = np.arange(vol_size[1])
    yy = np.arange(vol_size[0])
    zz = np.arange(vol_size[2])
    grid = np.rollaxis(np.array(np.meshgrid(xx, yy, zz)), 0, 4)

    percentile = 99
    dice_means = []

    results = {}

    for step in range(0, n_test):

        res = {}

        vol_name, seg_name = test_brain_strings[step].split(",")
        X_vol, X_seg = datagenerators.load_example_by_name(vol_name, seg_name)

        with tf.device(gpu):
            pred = net.predict([X_vol, atlas_vol])
            warped_image = np.transpose(pred[0][0, :, :, :, :], (2, 0, 1, 3))
            pred_ac_features = feature_model.predict([warped_image])
            orig_ac_features = feature_model.predict(
                [np.transpose(X_vol[0, :, :, :, :], (2, 0, 1, 3))])

        # Warp segments with flow
        flow = pred[1][0, :, :, :, :]
        sample = flow + grid
        sample = np.stack(
            (sample[:, :, :, 1], sample[:, :, :, 0], sample[:, :, :, 2]), 3)
        warp_seg = interpn((yy, xx, zz),
                           X_seg[0, :, :, :, 0],
                           sample,
                           method='nearest',
                           bounds_error=False,
                           fill_value=0)

        vals, _ = dice(warp_seg, atlas_seg, labels=labels, nargout=2)
        # print(np.mean(vals), np.std(vals))
        mean = np.mean(vals)
        std = np.std(vals)

        res['dice_mean'] = mean
        res['dice_std'] = std

        for i in range(len(pred_ac_features)):
            normalized_pred = normalize_percentile(pred_ac_features[i],
                                                   percentile,
                                                   feature_stats,
                                                   i,
                                                   twod=True)
            normalized_orig = normalize_percentile(orig_ac_features[i],
                                                   percentile,
                                                   feature_stats,
                                                   i,
                                                   twod=True)

            for j in range(normalized_pred.shape[-1]):
                pred_feature = normalized_pred[:, :, :, j]
                orig_feature = normalized_orig[:, :, :, j]
                append_to_dict(res, 'l1_diff',
                               np.mean(np.abs(pred_feature - orig_feature)))
                append_to_dict(res, 'l2_diff',
                               np.mean(np.square(pred_feature - orig_feature)))

                append_to_dict(res, 'pred_mean', np.mean(pred_feature))
                append_to_dict(res, 'pred_std', np.std(pred_feature))
                append_to_dict(res, 'pred_99pc',
                               np.percentile(pred_feature, 99))
                append_to_dict(res, 'pred_1pc', np.percentile(pred_feature, 1))

                append_to_dict(res, 'orig_mean', np.mean(orig_feature))
                append_to_dict(res, 'orig_std', np.std(orig_feature))
                append_to_dict(res, 'orig_99pc',
                               np.percentile(orig_feature, 99))
                append_to_dict(res, 'orig_1pc', np.percentile(orig_feature, 1))

        dice_means.append(mean)

        results[vol_name] = res
        print(step, mean, std)
        print('time:', time.time() - start_time)

    print('average dice:', np.mean(dice_means))
    print('time taken:', time.time() - start_time)
    # for key, value in results.items():
    #     print(key)
    #     print(value)

    with open(filename, 'wb') as file:
        file.write(
            pickle.dumps(results))  # use `pickle.loads` to do the reverse
def test(
        gpu_id,
        iter_num,
        compute_type='GPU',  # GPU or CPU
        vol_size=(160, 192, 224),
        nf_enc=[16, 32, 32, 32],
        nf_dec=[32, 32, 32, 32, 16, 3],
        save_file=None):
    """
    test by segmentation, compute dice between atlas_seg and warp_seg
    :param gpu_id: gpu id
    :param iter_num: specify the model to read
    :param compute_type: CPU/GPU
    :param vol_size: volume size
    :param nf_enc: number of encoder
    :param nf_dec: number of decoder
    :param save_file: None
    :return: None
    """

    # GPU handling
    gpu = '/gpu:' + str(gpu_id)
    os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_id)
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    config.allow_soft_placement = True
    set_session(tf.Session(config=config))

    # load weights of model
    with tf.device(gpu):
        # if testing miccai run, should be xy indexing.
        net = networks.miccai2018_net(vol_size,
                                      nf_enc,
                                      nf_dec,
                                      use_miccai_int=True,
                                      indexing='xy')
        model_dir = "/home/ys895/rigid_diff_model/"
        net.load_weights(os.path.join(model_dir, str(iter_num) + '.h5'))

        # compose diffeomorphic flow output model
        diff_net = keras.models.Model(net.inputs,
                                      net.get_layer('diffflow').output)

        # NN transfer model
        nn_trf_model = networks.nn_trf(vol_size)

    # if CPU, prepare grid
    if compute_type == 'CPU':
        grid, xx, yy, zz = util.volshape2grid_3d(vol_size, nargout=4)

    # prepare a matrix of dice values
    dice_vals = np.zeros((len(good_labels), n_batches))
    for k in range(n_batches):
        # get data
        vol_name, seg_name = test_brain_strings[k].split(",")
        X_vol, X_seg = datagenerators.load_example_by_name(vol_name, seg_name)
        orig_vol = X_vol
        orig_seg = X_seg

        theta = 0
        beta = 5
        omega = 0
        X_seg = rotate_img(X_seg[0, :, :, :, 0],
                           theta=theta,
                           beta=beta,
                           omega=omega)
        X_vol = rotate_img(X_vol[0, :, :, :, 0],
                           theta=theta,
                           beta=beta,
                           omega=omega)
        X_seg = X_seg.reshape((1, ) + X_seg.shape + (1, ))
        X_vol = X_vol.reshape((1, ) + X_vol.shape + (1, ))

        sample_num = 30
        grid_dimension = 4

        # predict transform
        with tf.device(gpu):
            pred = diff_net.predict([X_vol, atlas_vol])

        # Warp segments with flow
        if compute_type == 'CPU':
            flow = pred[0, :, :, :, :]
            warp_seg = util.warp_seg(X_seg,
                                     flow,
                                     grid=grid,
                                     xx=xx,
                                     yy=yy,
                                     zz=zz)
        else:  # GPU

            flow = pred[0, :, :, :, :]

            # sample coordinate(sample_num * sample_num * sample_num)
            x = np.linspace(0, (vol_size[0] / sample_num) * (sample_num - 1),
                            sample_num)
            x = x.astype(np.int32)
            y = np.linspace(0, (vol_size[1] / sample_num) * (sample_num - 1),
                            sample_num)
            y = y.astype(np.int32)
            z = np.linspace(0, (vol_size[2] / sample_num) * (sample_num - 1),
                            sample_num)
            z = z.astype(np.int32)
            index = np.rollaxis(np.array(np.meshgrid(y, x, z)), 0, 4)
            x = index[:, :, :, 1]
            y = index[:, :, :, 0]
            z = index[:, :, :, 2]

            # Y in formula
            x_flow = np.arange(vol_size[0])
            y_flow = np.arange(vol_size[1])
            z_flow = np.arange(vol_size[2])
            grid = np.rollaxis(np.array((np.meshgrid(y_flow, x_flow, z_flow))),
                               0, 4)  # original coordinate
            grid_x = grid_sample(x, y, z, grid[:, :, :, 1], sample_num)
            grid_y = grid_sample(x, y, z, grid[:, :, :, 0], sample_num)
            grid_z = grid_sample(x, y, z, grid[:, :, :, 2],
                                 sample_num)  # X (10,10,10)

            sample = flow + grid
            sample_x = grid_sample(x, y, z, sample[:, :, :, 1], sample_num)
            sample_y = grid_sample(x, y, z, sample[:, :, :, 0], sample_num)
            sample_z = grid_sample(x, y, z, sample[:, :, :, 2],
                                   sample_num)  # Y (10,10,10)

            sum_x = np.sum(flow[:, :, :, 1])
            sum_y = np.sum(flow[:, :, :, 0])
            sum_z = np.sum(flow[:, :, :, 2])

            ave_x = sum_x / (vol_size[0] * vol_size[1] * vol_size[2])
            ave_y = sum_y / (vol_size[0] * vol_size[1] * vol_size[2])
            ave_z = sum_z / (vol_size[0] * vol_size[1] * vol_size[2])

            # formula
            Y = np.zeros((sample_num, sample_num, sample_num, grid_dimension))
            X = np.zeros((sample_num, sample_num, sample_num, grid_dimension))
            T = np.array([ave_x, ave_y, ave_z, 1])  # (4,1)
            print(T)

            for i in np.arange(sample_num):
                for j in np.arange(sample_num):
                    for z in np.arange(sample_num):
                        Y[i, j, z, :] = np.array([
                            sample_x[i, j, z], sample_y[i, j, z],
                            sample_z[i, j, z], 1
                        ])
                        #Y[i, j, z, :] = Y[i, j, z, :] - np.array([ave_x, ave_y, ave_z, 0])  # amend: Y` = Y - T

            for i in np.arange(sample_num):
                for j in np.arange(sample_num):
                    for z in np.arange(sample_num):
                        X[i, j, z, :] = np.array([
                            grid_x[i, j, z], grid_y[i, j, z], grid_z[i, j, z],
                            1
                        ])

            X = X.reshape(
                (sample_num * sample_num * sample_num, grid_dimension))
            Y = Y.reshape(
                (sample_num * sample_num * sample_num, grid_dimension))
            R = np.dot(
                np.dot(np.linalg.pinv(np.dot(np.transpose(X), X)),
                       np.transpose(X)), Y)  # R(4, 4)
            print(R)
            beta = -(beta / 180) * math.pi
            R = np.array([[math.cos(beta), 0, -math.sin(beta), 0],
                          [0, 1, 0, 0], [math.sin(beta), 0,
                                         math.cos(beta), 0], [0, 0, 0, 1]])
            #R = R.transpose()

            # build new grid(Use R to do the spatial transform)
            shifted_x = np.arange(vol_size[0])
            shifted_y = np.arange(vol_size[1])
            shifted_z = np.arange(vol_size[2])
            shifted_grid = np.rollaxis(
                np.array((np.meshgrid(shifted_y, shifted_x, shifted_z))), 0, 4)

            # some required matrixs
            T1 = np.array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0],
                           [
                               -int(vol_size[0] / 2), -int(vol_size[1] / 2),
                               -int(vol_size[2] / 2), 1
                           ]])

            T2 = np.array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0],
                           [
                               int(vol_size[0] / 2),
                               int(vol_size[1] / 2),
                               int(vol_size[2] / 2), 1
                           ]])

            for i in np.arange(vol_size[0]):
                for j in np.arange(vol_size[1]):
                    for z in np.arange(vol_size[2]):
                        #coordinates = np.dot(R, np.array([i, j, z, 1]).reshape(4, 1)) + T.reshape(4, 1)
                        coordinates = np.dot(
                            np.dot(
                                np.dot(
                                    np.array([i, j, z, 1]).reshape(1, 4), T1),
                                R), T2)  # new implementation
                        # print("voxel." + '(' + str(i) + ',' + str(j) + ',' + str(z) + ')')
                        shifted_grid[i, j, z, 1] = coordinates[0, 0]
                        shifted_grid[i, j, z, 0] = coordinates[0, 1]
                        shifted_grid[i, j, z, 2] = coordinates[0, 2]

            # interpolation
            xx = np.arange(vol_size[1])
            yy = np.arange(vol_size[0])
            zz = np.arange(vol_size[2])
            shifted_grid = np.stack(
                (shifted_grid[:, :, :, 1], shifted_grid[:, :, :, 0],
                 shifted_grid[:, :, :, 2]), 3
            )  # notice: the shifted_grid is reverse in x and y, so this step is used for making it back.
            warp_seg = interpn((yy, xx, zz),
                               X_seg[0, :, :, :, 0],
                               shifted_grid,
                               method='nearest',
                               bounds_error=False,
                               fill_value=0)  # rigid registration
            warp_vol = interpn((yy, xx, zz),
                               X_vol[0, :, :, :, 0],
                               shifted_grid,
                               method='nearest',
                               bounds_error=False,
                               fill_value=0)  # rigid registration

        # compute Volume Overlap (Dice)
        dice_vals[:, k] = dice(warp_seg,
                               orig_seg[0, :, :, :, 0],
                               labels=good_labels)
        print('%3d %5.3f %5.3f' % (k, np.mean(
            dice_vals[:, k]), np.mean(np.mean(dice_vals[:, :k + 1]))))

        if save_file is not None:
            sio.savemat(save_file, {
                'dice_vals': dice_vals,
                'labels': good_labels
            })

        # specify slice
        num_slice = 90

        plt.figure()
        plt.subplot(1, 3, 1)
        plt.imshow(orig_vol[0, :, num_slice, :, 0])
        plt.subplot(1, 3, 2)
        plt.imshow(X_vol[0, :, num_slice, :, 0])
        plt.subplot(1, 3, 3)
        plt.imshow(warp_vol[:, num_slice, :])
        plt.savefig("slice" + str(num_slice) + '_' + str(k) + ".png")

        plt.figure()
        plt.subplot(1, 3, 1)
        plt.imshow(flow[:, num_slice, :, 1])
        plt.subplot(1, 3, 2)
        plt.imshow(flow[:, num_slice, :, 0])
        plt.subplot(1, 3, 3)
        plt.imshow(flow[:, num_slice, :, 2])
        plt.savefig("flow.png")
def test(iter_num, gpu_id, vol_size=(160,192,224), nf_enc=[16,32,32,32], nf_dec=[32,32,32,32,32,16,16,3]):
 gpu = '/gpu:' + str(gpu_id)

 # Anatomical labels we want to evaluate
 labels = sio.loadmat('../data/labels.mat')['labels'][0]

 # read atlas
 atlas_vol1, atlas_seg1 = datagenerators.load_example_by_name('/home/ys895/resize256/resize256-crop_x32/FromEugenio_prep/vols/990114_vc722.npz',
                                                              '/home/ys895/resize256/resize256-crop_x32/FromEugenio_prep/labels/990114_vc722.npz')# [1,160,192,224,1]
 atlas_seg1 = atlas_seg1[0,:,:,:,0]# reduce the dimension to [160,192,224]

 atlas_vol2, atlas_seg2 = datagenerators.load_example_by_name('/home/ys895/resize256/resize256-crop_x32/FromEugenio_prep/vols/990210_vc792.npz',
                                                              '/home/ys895/resize256/resize256-crop_x32/FromEugenio_prep/labels/990210_vc792.npz')
 atlas_seg2 = atlas_seg2[0, :, :, :, 0]

 #gpu = '/gpu:' + str(gpu_id)
 os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_id)
 config = tf.ConfigProto()
 config.gpu_options.allow_growth = True
 config.allow_soft_placement = True
 set_session(tf.Session(config=config))

 # load weights of model
 with tf.device(gpu):
    net = networks.unet(vol_size, nf_enc, nf_dec)
    net.load_weights('/home/ys895/MAS2_Models/'+str(iter_num)+'.h5')
    #net.load_weights('../models/' + model_name + '/' + str(iter_num) + '.h5')

 xx = np.arange(vol_size[1])
 yy = np.arange(vol_size[0])
 zz = np.arange(vol_size[2])
 grid = np.rollaxis(np.array(np.meshgrid(xx, yy, zz)), 0, 4) # (160, 192, 224, 3)
 #X_vol, X_seg = datagenerators.load_example_by_name('../data/test_vol.npz', '../data/test_seg.npz')
 X_vol1, X_seg1 = datagenerators.load_example_by_name('/home/ys895/resize256/resize256-crop_x32/FromEugenio_prep/vols/981216_vc681.npz',
                                                      '/home/ys895/resize256/resize256-crop_x32/FromEugenio_prep/labels/981216_vc681.npz')

 X_vol2, X_seg2 = datagenerators.load_example_by_name('/home/ys895/resize256/resize256-crop_x32/FromEugenio_prep/vols/990205_vc783.npz',
                                                      '/home/ys895/resize256/resize256-crop_x32/FromEugenio_prep/labels/990205_vc783.npz')

 X_vol3, X_seg3 = datagenerators.load_example_by_name('/home/ys895/resize256/resize256-crop_x32/FromEugenio_prep/vols/990525_vc1024.npz',
                                                     '/home/ys895/resize256/resize256-crop_x32/FromEugenio_prep/labels/990525_vc1024.npz')

 X_vol4, X_seg4 = datagenerators.load_example_by_name('/home/ys895/resize256/resize256-crop_x32/FromEugenio_prep/vols/991025_vc1379.npz',
                                                      '/home/ys895/resize256/resize256-crop_x32/FromEugenio_prep/labels/991025_vc1379.npz')

 X_vol5, X_seg5 = datagenerators.load_example_by_name('/home/ys895/resize256/resize256-crop_x32/FromEugenio_prep/vols/991122_vc1463.npz',
                                                     '/home/ys895/resize256/resize256-crop_x32/FromEugenio_prep/labels/991122_vc1463.npz')

 # change the direction of the atlas data and volume data
 # pred[0].shape (1, 160, 192, 224, 1)
 # pred[1].shape (1, 160, 192, 224, 3)
 # X1
 with tf.device(gpu):
    pred1 = net.predict([atlas_vol1, X_vol1])

 # Warp segments with flow
 flow1 = pred1[1][0, :, :, :, :]# (1, 160, 192, 224, 3)

 sample1 = flow1+grid
 sample1 = np.stack((sample1[:, :, :, 1], sample1[:, :, :, 0], sample1[:, :, :, 2]), 3)

 warp_seg1 = interpn((yy, xx, zz), atlas_seg1[ :, :, : ], sample1, method='nearest', bounds_error=False, fill_value=0) # (160, 192, 224)



 # label fusion: get the final warp_seg
 warp_seg = np.empty((160, 192, 224))
 for x in range(0,160):
     for y in range(0,192):
         for z in range(0,224):
          warp_seg = np.array(warp_seg1[x, y, z])

 vals, _ = dice(warp_seg, X_seg1[0, :, :, :, 0], labels=labels, nargout=2)
 mean1 = np.mean(vals)

 # X2
 with tf.device(gpu):
    pred1 = net.predict([atlas_vol1, X_vol2])

 # Warp segments with flow
 flow1 = pred1[1][0, :, :, :, :]# (1, 160, 192, 224, 3)

 sample1 = flow1+grid
 sample1 = np.stack((sample1[:, :, :, 1], sample1[:, :, :, 0], sample1[:, :, :, 2]), 3)

 warp_seg1 = interpn((yy, xx, zz), atlas_seg1[ :, :, : ], sample1, method='nearest', bounds_error=False, fill_value=0) # (160, 192, 224)



 # label fusion: get the final warp_seg
 warp_seg = np.empty((160, 192, 224))
 for x in range(0,160):
     for y in range(0,192):
         for z in range(0,224):
          warp_seg = np.array(warp_seg1[x, y, z])

 vals, _ = dice(warp_seg, X_seg2[0,:,:,:,0], labels=labels, nargout=2)
 mean2 = np.mean(vals)
 #print(np.mean(vals), np.std(vals))

# X3
 with tf.device(gpu):
    pred1 = net.predict([atlas_vol1, X_vol3])

 # Warp segments with flow
 flow1 = pred1[1][0, :, :, :, :]# (1, 160, 192, 224, 3)

 sample1 = flow1+grid
 sample1 = np.stack((sample1[:, :, :, 1], sample1[:, :, :, 0], sample1[:, :, :, 2]), 3)

 warp_seg1 = interpn((yy, xx, zz), atlas_seg1[ :, :, : ], sample1, method='nearest', bounds_error=False, fill_value=0) # (160, 192, 224)



 # label fusion: get the final warp_seg
 warp_seg = np.empty((160, 192, 224))
 for x in range(0,160):
     for y in range(0,192):
         for z in range(0,224):
           warp_seg = np.array(warp_seg1[x, y, z])

 vals, _ = dice(warp_seg, X_seg3[0, :, :, :, 0], labels=labels, nargout=2)
 mean3 = np.mean(vals)

# X4
 with tf.device(gpu):
    pred1 = net.predict([atlas_vol1, X_vol4])

 # Warp segments with flow
 flow1 = pred1[1][0, :, :, :, :]# (1, 160, 192, 224, 3)

 sample1 = flow1+grid
 sample1 = np.stack((sample1[:, :, :, 1], sample1[:, :, :, 0], sample1[:, :, :, 2]), 3)

 warp_seg1 = interpn((yy, xx, zz), atlas_seg1[ :, :, : ], sample1, method='nearest', bounds_error=False, fill_value=0) # (160, 192, 224)


 # label fusion: get the final warp_seg
 warp_seg = np.empty((160, 192, 224))
 for x in range(0,160):
     for y in range(0,192):
         for z in range(0,224):
           warp_seg = np.array(warp_seg1[x, y, z])

 vals, _ = dice(warp_seg, X_seg4[0, :, :, :, 0], labels=labels, nargout=2)
 mean4 = np.mean(vals)


# X5
 with tf.device(gpu):
    pred1 = net.predict([atlas_vol1, X_vol5])

 # Warp segments with flow
 flow1 = pred1[1][0, :, :, :, :]# (1, 160, 192, 224, 3)

 sample1 = flow1+grid
 sample1 = np.stack((sample1[:, :, :, 1], sample1[:, :, :, 0], sample1[:, :, :, 2]), 3)

 warp_seg1 = interpn((yy, xx, zz), atlas_seg1[ :, :, : ], sample1, method='nearest', bounds_error=False, fill_value=0) # (160, 192, 224)


 # label fusion: get the final warp_seg
 warp_seg = np.empty((160, 192, 224))
 for x in range(0,160):
     for y in range(0,192):
         for z in range(0,224):
             warp_seg = np.array(warp_seg1[x,y,z])
             #print(warp_arr)
             #warp_seg[x,y,z] = stats.mode(warp_arr)[0]

 vals, _ = dice(warp_seg, X_seg5[0, :, :, :, 0], labels=labels, nargout=2)
 mean5 = np.mean(vals)

 # compute mean of dice score
 sum = mean1 + mean2 + mean3 + mean4 + mean5
 mean_dice = sum/5
 print(mean_dice)
Exemplo n.º 18
0
def test(model_name,
         epoch,
         gpu_id,
         n_test,
         invert_images,
         max_clip,
         indexing,
         use_miccai,
         atlas_file,
         atlas_seg_file,
         normalize_atlas,
         vol_size=(160, 192, 224),
         nf_enc=[16, 32, 32, 32],
         nf_dec=[32, 32, 32, 32, 32, 16, 16]):
    start_time = time.time()
    good_labels = sio.loadmat('../data/labels.mat')['labels'][0]

    # setup
    gpu = '/gpu:' + str(gpu_id)
    #     print(gpu)
    os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'

    restrict_GPU_tf(str(gpu_id))
    restrict_GPU_keras(str(gpu_id))

    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    config.allow_soft_placement = True
    set_session(tf.Session(config=config))

    atlas_vol = nib.load(atlas_file).get_data()[np.newaxis, ..., np.newaxis]
    atlas_seg = nib.load(atlas_seg_file).get_data()

    if normalize_atlas:
        atlas_vol = atlas_vol / np.max(atlas_vol) * max_clip

    sz = atlas_seg.shape
    z_inp1 = tf.placeholder(tf.float32, sz)
    z_inp2 = tf.placeholder(tf.float32, sz)
    z_out = losses.kdice(z_inp1, z_inp2, good_labels)
    kdice_fn = K.function([z_inp1, z_inp2], [z_out])

    # load weights of model
    with tf.device(gpu):
        if use_miccai:
            net = networks.miccai2018_net(vol_size, nf_enc, nf_dec)
            net.load_weights('../models/' + model_name + '/' + str(epoch) +
                             '.h5')
            trf_model = networks.trf_core(
                (vol_size[0] // 2, vol_size[1] // 2, vol_size[2] // 2),
                nb_feats=len(good_labels) + 1,
                indexing=indexing)

        else:
            net = networks.cvpr2018_net(vol_size, nf_enc, nf_dec)
            net.load_weights('../models/' + model_name + '/' + str(epoch) +
                             '.h5')
            trf_model = networks.trf_core(vol_size,
                                          nb_feats=len(good_labels) + 1,
                                          indexing=indexing)

    dice_means = []
    dice_stds = []

    for step in range(0, n_test):
        # get data
        if n_test == 1:
            X_vol = nib.load('../t1_atlas.nii').get_data()[np.newaxis, ...,
                                                           np.newaxis]
            X_seg = nib.load('../t1_atlas_seg.nii').get_data()[np.newaxis, ...,
                                                               np.newaxis]
        else:
            vol_name, seg_name = test_brain_strings[step].split(",")
            X_vol, X_seg = datagenerators.load_example_by_name(
                vol_name, seg_name)

        if invert_images:
            X_vol = max_clip - X_vol

        with tf.device(gpu):
            pred = net.predict([X_vol, atlas_vol])
            all_labels = np.unique(X_seg)
            for l in all_labels:
                if l not in good_labels:
                    X_seg[X_seg == l] = 0
            for i in range(len(good_labels)):
                X_seg[X_seg == good_labels[i]] = i + 1
            seg_onehot = tf.keras.utils.to_categorical(
                X_seg[0, :, :, :, 0], num_classes=len(good_labels) + 1)
            warp_seg_onehot = trf_model.predict(
                [seg_onehot[tf.newaxis, :, :, :, :], pred[1]])
            warp_seg = np.argmax(warp_seg_onehot[0, :, :, :, :], axis=3)

            warp_seg_correct = np.zeros(warp_seg.shape)
            for i in range(len(good_labels)):
                warp_seg_correct[warp_seg == i + 1] = good_labels[i]

            dice = kdice_fn([warp_seg_correct, atlas_seg])

            mean = np.mean(dice)
            std = np.std(dice)
            dice_means.append(mean)
            dice_stds.append(std)
            print(step, mean, std)

    print('average dice:', np.mean(dice_means))
    print('std over patients:', np.std(dice_means))
    print('average std over regions:', np.mean(dice_stds))
    print('time taken:', time.time() - start_time)
Exemplo n.º 19
0
def test(
        model_name,
        gpu_id,
        compute_type='GPU',  # GPU or CPU
        nf_enc=[16, 32, 32, 32],
        nf_dec=[32, 32, 32, 32, 32, 16, 16]):
    """
    test

    nf_enc and nf_dec
    #nf_dec = [32,32,32,32,32,16,16,3]
    # This needs to be changed. Ideally, we could just call load_model, and we wont have to
    # specify the # of channels here, but the load_model is not working with the custom loss...
    """

    # Anatomical labels we want to evaluate
    labels = sio.loadmat('../data/labels.mat')['labels'][0]

    atlas = np.load('../data/atlas_norm.npz')
    atlas_vol = atlas['vol'][np.newaxis, ..., np.newaxis]
    atlas_seg = atlas['seg']
    vol_size = atlas_vol.shape[1:-1]

    # gpu handling
    gpu = '/gpu:' + str(gpu_id)
    os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_id)
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    config.allow_soft_placement = True
    set_session(tf.Session(config=config))

    # load weights of model
    with tf.device(gpu):
        net = networks.cvpr2018_net(vol_size, nf_enc, nf_dec)
        net.load_weights(model_name)

        # NN transfer model
        nn_trf_model = networks.nn_trf(vol_size, indexing='ij')

    # if CPU, prepare grid
    if compute_type == 'CPU':
        grid, xx, yy, zz = util.volshape2grid_3d(vol_size, nargout=4)

    # load subject test
    X_vol, X_seg = datagenerators.load_example_by_name('../data/test_vol.npz',
                                                       '../data/test_seg.npz')

    with tf.device(gpu):
        pred = net.predict([X_vol, atlas_vol])

        # Warp segments with flow
        if compute_type == 'CPU':
            flow = pred[1][0, :, :, :, :]
            warp_seg = util.warp_seg(X_seg,
                                     flow,
                                     grid=grid,
                                     xx=xx,
                                     yy=yy,
                                     zz=zz)

        else:  # GPU
            warp_seg = nn_trf_model.predict([X_seg, pred[1]])[0, ..., 0]

    vals, _ = dice(warp_seg, atlas_seg, labels=labels, nargout=2)
    dice_mean = np.mean(vals)
    dice_std = np.std(vals)
    print('Dice mean over structures: {:.2f} ({:.2f})'.format(
        dice_mean, dice_std))
Exemplo n.º 20
0
def test(model_name,
         gpu_id,
         iter_num,
         vol_size=(160, 192, 224),
         nf_enc=[16, 32, 32, 32],
         nf_dec=[32, 32, 32, 32, 8, 8, 3]):
    """
	test

	nf_enc and nf_dec
	#nf_dec = [32,32,32,32,32,16,16,3]
    # This needs to be changed. Ideally, we could just call load_model, and we wont have to
    # specify the # of channels here, but the load_model is not working with the custom loss...
    """

    gpu = '/gpu:' + str(gpu_id)

    # Test file and anatomical labels we want to evaluate
    test_brain_file = open('../data/test_examples.txt')
    test_brain_strings = test_brain_file.readlines()
    test_brain_strings = [x.strip() for x in test_brain_strings]
    good_labels = sio.loadmat('../data/test_labels.mat')['labels'][0]

    atlas = np.load('../data/atlas_norm.npz')
    atlas_vol = atlas['vol']
    atlas_seg = atlas['seg']
    atlas_vol = np.reshape(atlas_vol, (1, ) + atlas_vol.shape + (1, ))

    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    config.allow_soft_placement = True
    set_session(tf.Session(config=config))

    # load weights of model
    with tf.device(gpu):
        net = networks.unet(vol_size, nf_enc, nf_dec)
        net.load_weights('../models/' + model_name + '/' + str(iter_num) +
                         '.h5')

    n_batches = len(test_brain_strings)
    xx = np.arange(vol_size[1])
    yy = np.arange(vol_size[0])
    zz = np.arange(vol_size[2])
    grid = np.rollaxis(np.array(np.meshgrid(xx, yy, zz)), 0, 4)

    dice_vals = np.zeros((len(good_labels), n_batches))

    np.random.seed(17)

    for k in range(0, n_batches):
        vol_name, seg_name = test_brain_strings[k].split(",")
        X_vol, X_seg = datagenerators.load_example_by_name(vol_name, seg_name)

        with tf.device(gpu):
            pred = net.predict([X_vol, atlas_vol])

        # Warp segments with flow
        flow = pred[1][0, :, :, :, :]
        sample = flow + grid
        sample = np.stack(
            (sample[:, :, :, 1], sample[:, :, :, 0], sample[:, :, :, 2]), 3)
        warp_seg = interpn((yy, xx, zz),
                           X_seg[0, :, :, :, 0],
                           sample,
                           method='nearest',
                           bounds_error=False,
                           fill_value=0)

        vals, labels = dice(warp_seg, atlas_seg, labels=good_labels, nargout=2)
        dice_vals[:, k] = vals
        print np.mean(dice_vals[:, k])
Exemplo n.º 21
0
def test(iter_num,
         gpu_id,
         vol_size=(160, 192, 224),
         nf_enc=[16, 32, 32, 32],
         nf_dec=[32, 32, 32, 32, 32, 16, 16, 3]):
    gpu = '/gpu:' + str(gpu_id)

    # Anatomical labels we want to evaluate
    labels = sio.loadmat('../data/labels.mat')['labels'][0]

    # read atlas
    atlas_vol1, atlas_seg1 = datagenerators.load_example_by_name(
        '/home/ys895/resize256/resize256-crop_x32/FromEugenio_prep/vols/990114_vc722.npz',
        '/home/ys895/resize256/resize256-crop_x32/FromEugenio_prep/labels/990114_vc722.npz'
    )  # [1,160,192,224,1]
    atlas_seg1 = atlas_seg1[0, :, :, :,
                            0]  # reduce the dimension to [160,192,224]
    atlas_seg1 = keras.utils.to_categorical(atlas_seg1)
    atlas_vol2, atlas_seg2 = datagenerators.load_example_by_name(
        '/home/ys895/resize256/resize256-crop_x32/FromEugenio_prep/vols/990210_vc792.npz',
        '/home/ys895/resize256/resize256-crop_x32/FromEugenio_prep/labels/990210_vc792.npz'
    )
    atlas_seg2 = atlas_seg2[0, :, :, :, 0]
    atlas_seg2 = keras.utils.to_categorical(atlas_seg2)
    atlas_vol3, atlas_seg3 = datagenerators.load_example_by_name(
        '/home/ys895/resize256/resize256-crop_x32/FromEugenio_prep/vols/990405_vc922.npz',
        '/home/ys895/resize256/resize256-crop_x32/FromEugenio_prep/labels/990405_vc922.npz'
    )
    atlas_seg3 = atlas_seg3[0, :, :, :, 0]
    atlas_seg3 = keras.utils.to_categorical(atlas_seg3)
    atlas_vol4, atlas_seg4 = datagenerators.load_example_by_name(
        '/home/ys895/resize256/resize256-crop_x32/FromEugenio_prep/vols/991006_vc1337.npz',
        '/home/ys895/resize256/resize256-crop_x32/FromEugenio_prep/labels/991006_vc1337.npz'
    )
    atlas_seg4 = atlas_seg4[0, :, :, :, 0]
    atlas_seg4 = keras.utils.to_categorical(atlas_seg4)
    atlas_vol5, atlas_seg5 = datagenerators.load_example_by_name(
        '/home/ys895/resize256/resize256-crop_x32/FromEugenio_prep/vols/991120_vc1456.npz',
        '/home/ys895/resize256/resize256-crop_x32/FromEugenio_prep/labels/991120_vc1456.npz'
    )
    atlas_seg5 = atlas_seg5[0, :, :, :, 0]
    atlas_seg5 = keras.utils.to_categorical(atlas_seg5)
    #atlas = np.load('../data/atlas_norm.npz')
    #atlas_vol = atlas['vol']
    #print('the size of atlas:')
    #print(atlas_vol.shape)
    #atlas_seg = atlas['seg']
    #atlas_vol = np.reshape(atlas_vol, (1,)+atlas_vol.shape+(1,))

    #gpu = '/gpu:' + str(gpu_id)
    os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_id)
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    config.allow_soft_placement = True
    set_session(tf.Session(config=config))

    # load weights of model
    with tf.device(gpu):
        net = networks.unet(vol_size, nf_enc, nf_dec)
        net.load_weights('/home/ys895/MAS_Models/' + str(iter_num) + '.h5')
        #net.load_weights('../models/' + model_name + '/' + str(iter_num) + '.h5')

    xx = np.arange(vol_size[1])
    yy = np.arange(vol_size[0])
    zz = np.arange(vol_size[2])
    grid = np.rollaxis(np.array(np.meshgrid(xx, yy, zz)), 0,
                       4)  # (160, 192, 224, 3)
    #X_vol, X_seg = datagenerators.load_example_by_name('../data/test_vol.npz', '../data/test_seg.npz')
    X_vol1, X_seg1 = datagenerators.load_example_by_name(
        '/home/ys895/resize256/resize256-crop_x32/FromEugenio_prep/vols/981216_vc681.npz',
        '/home/ys895/resize256/resize256-crop_x32/FromEugenio_prep/labels/981216_vc681.npz'
    )

    X_vol2, X_seg2 = datagenerators.load_example_by_name(
        '/home/ys895/resize256/resize256-crop_x32/FromEugenio_prep/vols/990205_vc783.npz',
        '/home/ys895/resize256/resize256-crop_x32/FromEugenio_prep/labels/990205_vc783.npz'
    )

    X_vol3, X_seg3 = datagenerators.load_example_by_name(
        '/home/ys895/resize256/resize256-crop_x32/FromEugenio_prep/vols/990525_vc1024.npz',
        '/home/ys895/resize256/resize256-crop_x32/FromEugenio_prep/labels/990525_vc1024.npz'
    )

    X_vol4, X_seg4 = datagenerators.load_example_by_name(
        '/home/ys895/resize256/resize256-crop_x32/FromEugenio_prep/vols/991025_vc1379.npz',
        '/home/ys895/resize256/resize256-crop_x32/FromEugenio_prep/labels/991025_vc1379.npz'
    )

    X_vol5, X_seg5 = datagenerators.load_example_by_name(
        '/home/ys895/resize256/resize256-crop_x32/FromEugenio_prep/vols/991122_vc1463.npz',
        '/home/ys895/resize256/resize256-crop_x32/FromEugenio_prep/labels/991122_vc1463.npz'
    )

    # change the direction of the atlas data and volume data
    # pred[0].shape (1, 160, 192, 224, 1)
    # pred[1].shape (1, 160, 192, 224, 3)
    # X1
    with tf.device(gpu):
        pred1 = net.predict([atlas_vol1, X_vol1])
        pred2 = net.predict([atlas_vol2, X_vol1])
        pred3 = net.predict([atlas_vol3, X_vol1])
        pred4 = net.predict([atlas_vol4, X_vol1])
        pred5 = net.predict([atlas_vol5, X_vol1])
    # Warp segments with flow
    flow1 = pred1[1][0, :, :, :, :]  # (1, 160, 192, 224, 3)
    flow2 = pred2[1][0, :, :, :, :]
    flow3 = pred3[1][0, :, :, :, :]
    flow4 = pred4[1][0, :, :, :, :]
    flow5 = pred5[1][0, :, :, :, :]

    sample1 = flow1 + grid
    sample1 = np.stack(
        (sample1[:, :, :, 1], sample1[:, :, :, 0], sample1[:, :, :, 2]), 3)
    sample2 = flow2 + grid
    sample2 = np.stack(
        (sample2[:, :, :, 1], sample2[:, :, :, 0], sample2[:, :, :, 2]), 3)
    sample3 = flow3 + grid
    sample3 = np.stack(
        (sample3[:, :, :, 1], sample3[:, :, :, 0], sample3[:, :, :, 2]), 3)
    sample4 = flow4 + grid
    sample4 = np.stack(
        (sample4[:, :, :, 1], sample4[:, :, :, 0], sample4[:, :, :, 2]), 3)
    sample5 = flow5 + grid
    sample5 = np.stack(
        (sample5[:, :, :, 1], sample5[:, :, :, 0], sample5[:, :, :, 2]), 3)

    warp_seg1 = interpn((yy, xx, zz),
                        atlas_seg1[:, :, :, :],
                        sample1,
                        method='linear',
                        bounds_error=False,
                        fill_value=0)  # (160, 192, 224)
    warp_seg2 = interpn((yy, xx, zz),
                        atlas_seg2[:, :, :, :],
                        sample2,
                        method='linear',
                        bounds_error=False,
                        fill_value=0)
    warp_seg3 = interpn((yy, xx, zz),
                        atlas_seg3[:, :, :, :],
                        sample3,
                        method='linear',
                        bounds_error=False,
                        fill_value=0)
    warp_seg4 = interpn((yy, xx, zz),
                        atlas_seg4[:, :, :, :],
                        sample4,
                        method='linear',
                        bounds_error=False,
                        fill_value=0)
    warp_seg5 = interpn((yy, xx, zz),
                        atlas_seg5[:, :, :, :],
                        sample5,
                        method='linear',
                        bounds_error=False,
                        fill_value=0)

    # label fusion: get the final warp_seg
    warp_seg = np.empty((160, 192, 224, atlas_seg1.shape[3]))
    warp_seg = (warp_seg1 + warp_seg2 + warp_seg3 + warp_seg4 + warp_seg5) / 5
    warp_seg = np.argmax(warp_seg, axis=3)

    vals, _ = dice(warp_seg, X_seg1[0, :, :, :, 0], labels=labels, nargout=2)
    mean1 = np.mean(vals)
    var1 = np.std(vals)

    # X2
    with tf.device(gpu):
        pred1 = net.predict([atlas_vol1, X_vol2])
        pred2 = net.predict([atlas_vol2, X_vol2])
        pred3 = net.predict([atlas_vol3, X_vol2])
        pred4 = net.predict([atlas_vol4, X_vol2])
        pred5 = net.predict([atlas_vol5, X_vol2])
    # Warp segments with flow
    flow1 = pred1[1][0, :, :, :, :]  # (1, 160, 192, 224, 3)
    flow2 = pred2[1][0, :, :, :, :]
    flow3 = pred3[1][0, :, :, :, :]
    flow4 = pred4[1][0, :, :, :, :]
    flow5 = pred5[1][0, :, :, :, :]

    sample1 = flow1 + grid
    sample1 = np.stack(
        (sample1[:, :, :, 1], sample1[:, :, :, 0], sample1[:, :, :, 2]), 3)
    sample2 = flow2 + grid
    sample2 = np.stack(
        (sample2[:, :, :, 1], sample2[:, :, :, 0], sample2[:, :, :, 2]), 3)
    sample3 = flow3 + grid
    sample3 = np.stack(
        (sample3[:, :, :, 1], sample3[:, :, :, 0], sample3[:, :, :, 2]), 3)
    sample4 = flow4 + grid
    sample4 = np.stack(
        (sample4[:, :, :, 1], sample4[:, :, :, 0], sample4[:, :, :, 2]), 3)
    sample5 = flow5 + grid
    sample5 = np.stack(
        (sample5[:, :, :, 1], sample5[:, :, :, 0], sample5[:, :, :, 2]), 3)

    warp_seg1 = interpn((yy, xx, zz),
                        atlas_seg1[:, :, :, :],
                        sample1,
                        method='linear',
                        bounds_error=False,
                        fill_value=0)  # (160, 192, 224)
    warp_seg2 = interpn((yy, xx, zz),
                        atlas_seg2[:, :, :, :],
                        sample2,
                        method='linear',
                        bounds_error=False,
                        fill_value=0)
    warp_seg3 = interpn((yy, xx, zz),
                        atlas_seg3[:, :, :, :],
                        sample3,
                        method='linear',
                        bounds_error=False,
                        fill_value=0)
    warp_seg4 = interpn((yy, xx, zz),
                        atlas_seg4[:, :, :, :],
                        sample4,
                        method='linear',
                        bounds_error=False,
                        fill_value=0)
    warp_seg5 = interpn((yy, xx, zz),
                        atlas_seg5[:, :, :, :],
                        sample5,
                        method='linear',
                        bounds_error=False,
                        fill_value=0)

    # label fusion: get the final warp_seg
    warp_seg = np.empty((160, 192, 224, atlas_seg1.shape[3]))
    warp_seg = (warp_seg1 + warp_seg2 + warp_seg3 + warp_seg4 + warp_seg5) / 5
    warp_seg = np.argmax(warp_seg, axis=3)

    vals, _ = dice(warp_seg, X_seg2[0, :, :, :, 0], labels=labels, nargout=2)
    mean2 = np.mean(vals)
    var2 = np.std(vals)
    #print(np.mean(vals), np.std(vals))

    # X3
    with tf.device(gpu):
        pred1 = net.predict([atlas_vol1, X_vol3])
        pred2 = net.predict([atlas_vol2, X_vol3])
        pred3 = net.predict([atlas_vol3, X_vol3])
        pred4 = net.predict([atlas_vol4, X_vol3])
        pred5 = net.predict([atlas_vol5, X_vol3])
    # Warp segments with flow
    flow1 = pred1[1][0, :, :, :, :]  # (1, 160, 192, 224, 3)
    flow2 = pred2[1][0, :, :, :, :]
    flow3 = pred3[1][0, :, :, :, :]
    flow4 = pred4[1][0, :, :, :, :]
    flow5 = pred5[1][0, :, :, :, :]

    sample1 = flow1 + grid
    sample1 = np.stack(
        (sample1[:, :, :, 1], sample1[:, :, :, 0], sample1[:, :, :, 2]), 3)
    sample2 = flow2 + grid
    sample2 = np.stack(
        (sample2[:, :, :, 1], sample2[:, :, :, 0], sample2[:, :, :, 2]), 3)
    sample3 = flow3 + grid
    sample3 = np.stack(
        (sample3[:, :, :, 1], sample3[:, :, :, 0], sample3[:, :, :, 2]), 3)
    sample4 = flow4 + grid
    sample4 = np.stack(
        (sample4[:, :, :, 1], sample4[:, :, :, 0], sample4[:, :, :, 2]), 3)
    sample5 = flow5 + grid
    sample5 = np.stack(
        (sample5[:, :, :, 1], sample5[:, :, :, 0], sample5[:, :, :, 2]), 3)

    warp_seg1 = interpn((yy, xx, zz),
                        atlas_seg1[:, :, :, :],
                        sample1,
                        method='linear',
                        bounds_error=False,
                        fill_value=0)
    warp_seg2 = interpn((yy, xx, zz),
                        atlas_seg2[:, :, :, :],
                        sample2,
                        method='linear',
                        bounds_error=False,
                        fill_value=0)
    warp_seg3 = interpn((yy, xx, zz),
                        atlas_seg3[:, :, :, :],
                        sample3,
                        method='linear',
                        bounds_error=False,
                        fill_value=0)
    warp_seg4 = interpn((yy, xx, zz),
                        atlas_seg4[:, :, :, :],
                        sample4,
                        method='linear',
                        bounds_error=False,
                        fill_value=0)
    warp_seg5 = interpn((yy, xx, zz),
                        atlas_seg5[:, :, :, :],
                        sample5,
                        method='linear',
                        bounds_error=False,
                        fill_value=0)

    # label fusion: get the final warp_seg
    warp_seg = np.empty((160, 192, 224, atlas_seg1.shape[3]))
    warp_seg = (warp_seg1 + warp_seg2 + warp_seg3 + warp_seg4 + warp_seg5) / 5
    warp_seg = np.argmax(warp_seg, axis=3)

    vals, _ = dice(warp_seg, X_seg3[0, :, :, :, 0], labels=labels, nargout=2)
    mean3 = np.mean(vals)
    var3 = np.std(vals)

    # X4
    with tf.device(gpu):
        pred1 = net.predict([atlas_vol1, X_vol4])
        pred2 = net.predict([atlas_vol2, X_vol4])
        pred3 = net.predict([atlas_vol3, X_vol4])
        pred4 = net.predict([atlas_vol4, X_vol4])
        pred5 = net.predict([atlas_vol5, X_vol4])
    # Warp segments with flow
    flow1 = pred1[1][0, :, :, :, :]  # (1, 160, 192, 224, 3)
    flow2 = pred2[1][0, :, :, :, :]
    flow3 = pred3[1][0, :, :, :, :]
    flow4 = pred4[1][0, :, :, :, :]
    flow5 = pred5[1][0, :, :, :, :]

    sample1 = flow1 + grid
    sample1 = np.stack(
        (sample1[:, :, :, 1], sample1[:, :, :, 0], sample1[:, :, :, 2]), 3)
    sample2 = flow2 + grid
    sample2 = np.stack(
        (sample2[:, :, :, 1], sample2[:, :, :, 0], sample2[:, :, :, 2]), 3)
    sample3 = flow3 + grid
    sample3 = np.stack(
        (sample3[:, :, :, 1], sample3[:, :, :, 0], sample3[:, :, :, 2]), 3)
    sample4 = flow4 + grid
    sample4 = np.stack(
        (sample4[:, :, :, 1], sample4[:, :, :, 0], sample4[:, :, :, 2]), 3)
    sample5 = flow5 + grid
    sample5 = np.stack(
        (sample5[:, :, :, 1], sample5[:, :, :, 0], sample5[:, :, :, 2]), 3)

    warp_seg1 = interpn((yy, xx, zz),
                        atlas_seg1[:, :, :, :],
                        sample1,
                        method='linear',
                        bounds_error=False,
                        fill_value=0)
    warp_seg2 = interpn((yy, xx, zz),
                        atlas_seg2[:, :, :, :],
                        sample2,
                        method='linear',
                        bounds_error=False,
                        fill_value=0)
    warp_seg3 = interpn((yy, xx, zz),
                        atlas_seg3[:, :, :, :],
                        sample3,
                        method='linear',
                        bounds_error=False,
                        fill_value=0)
    warp_seg4 = interpn((yy, xx, zz),
                        atlas_seg4[:, :, :, :],
                        sample4,
                        method='linear',
                        bounds_error=False,
                        fill_value=0)
    warp_seg5 = interpn((yy, xx, zz),
                        atlas_seg5[:, :, :, :],
                        sample5,
                        method='linear',
                        bounds_error=False,
                        fill_value=0)

    # label fusion: get the final warp_seg
    warp_seg = np.empty((160, 192, 224, atlas_seg1.shape[3]))
    warp_seg = (warp_seg1 + warp_seg2 + warp_seg3 + warp_seg4 + warp_seg5) / 5
    warp_seg = np.argmax(warp_seg, axis=3)

    vals, _ = dice(warp_seg, X_seg4[0, :, :, :, 0], labels=labels, nargout=2)
    mean4 = np.mean(vals)
    var4 = np.std(vals)
    # X5
    with tf.device(gpu):
        pred1 = net.predict([atlas_vol1, X_vol5])
        pred2 = net.predict([atlas_vol2, X_vol5])
        pred3 = net.predict([atlas_vol3, X_vol5])
        pred4 = net.predict([atlas_vol4, X_vol5])
        pred5 = net.predict([atlas_vol5, X_vol5])
    # Warp segments with flow
    flow1 = pred1[1][0, :, :, :, :]  # (1, 160, 192, 224, 3)
    flow2 = pred2[1][0, :, :, :, :]
    flow3 = pred3[1][0, :, :, :, :]
    flow4 = pred4[1][0, :, :, :, :]
    flow5 = pred5[1][0, :, :, :, :]

    sample1 = flow1 + grid
    sample1 = np.stack(
        (sample1[:, :, :, 1], sample1[:, :, :, 0], sample1[:, :, :, 2]), 3)
    sample2 = flow2 + grid
    sample2 = np.stack(
        (sample2[:, :, :, 1], sample2[:, :, :, 0], sample2[:, :, :, 2]), 3)
    sample3 = flow3 + grid
    sample3 = np.stack(
        (sample3[:, :, :, 1], sample3[:, :, :, 0], sample3[:, :, :, 2]), 3)
    sample4 = flow4 + grid
    sample4 = np.stack(
        (sample4[:, :, :, 1], sample4[:, :, :, 0], sample4[:, :, :, 2]), 3)
    sample5 = flow5 + grid
    sample5 = np.stack(
        (sample5[:, :, :, 1], sample5[:, :, :, 0], sample5[:, :, :, 2]), 3)

    warp_seg1 = interpn((yy, xx, zz),
                        atlas_seg1[:, :, :, :],
                        sample1,
                        method='linear',
                        bounds_error=False,
                        fill_value=0)
    warp_seg2 = interpn((yy, xx, zz),
                        atlas_seg2[:, :, :, :],
                        sample2,
                        method='linear',
                        bounds_error=False,
                        fill_value=0)
    warp_seg3 = interpn((yy, xx, zz),
                        atlas_seg3[:, :, :, :],
                        sample3,
                        method='linear',
                        bounds_error=False,
                        fill_value=0)
    warp_seg4 = interpn((yy, xx, zz),
                        atlas_seg4[:, :, :, :],
                        sample4,
                        method='linear',
                        bounds_error=False,
                        fill_value=0)
    warp_seg5 = interpn((yy, xx, zz),
                        atlas_seg5[:, :, :, :],
                        sample5,
                        method='linear',
                        bounds_error=False,
                        fill_value=0)

    # label fusion: get the final warp_seg
    warp_seg = np.empty((160, 192, 224, atlas_seg1.shape[3]))
    warp_seg = (warp_seg1 + warp_seg2 + warp_seg3 + warp_seg4 + warp_seg5) / 5
    warp_seg = np.argmax(warp_seg, axis=3)

    vals, _ = dice(warp_seg, X_seg5[0, :, :, :, 0], labels=labels, nargout=2)
    mean5 = np.mean(vals)
    var5 = np.std(vals)

    # compute mean of dice score
    sum = mean1 + mean2 + mean3 + mean4 + mean5
    mean_dice = sum / 5
    var = (var1 + var2 + var3 + var4 + var5) / 5
    print(str(mean_dice) + ',' + str(var))
Exemplo n.º 22
0
def test(data_dir, fixed_image, label, device, load_model_file, DLR_model):

    assert DLR_model in [
        'VM', 'FAIM'
    ], 'DLR_model should be one of VM or FAIM, found %s' % LBR_model

    # prepare data files
    # inside the folder are npz files with the 'vol' and 'label'.
    test_vol_names = glob.glob(os.path.join(data_dir, '*.npz'))
    assert len(test_vol_names) > 0, "Could not find any testing data"

    fixed_vol = np.load(fixed_image)['vol'][np.newaxis, ..., np.newaxis]
    fixed_seg = np.load(fixed_image)['label']
    vol_size = fixed_vol.shape[1:-1]
    label = np.load(label)

    # device handling
    if 'gpu' in device:
        if '0' in device:
            device = '/gpu:0'
        if '1' in device:
            device = '/gpu:1'
        config = tf.ConfigProto()
        config.gpu_options.allow_growth = True
        config.allow_soft_placement = True
        set_session(tf.Session(config=config))
    else:
        device = '/cpu:0'

    # load weights of model
    with tf.device(device):
        net = networks.AAN(vol_size, DLR_model)
        net.load_weights(load_model_file)

        # NN transfer model
        nn_trf_model_nearest = networks.nn_trf(vol_size,
                                               interp_method='nearest',
                                               indexing='ij')
        nn_trf_model_linear = networks.nn_trf(vol_size,
                                              interp_method='linear',
                                              indexing='ij')

    dice_result = []
    for test_image in test_vol_names:

        X_vol, X_seg, x_boundary = datagenerators.load_example_by_name(
            test_image, return_boundary=True)

        with tf.device(device):
            pred = net.predict([X_vol, fixing_vol, x_boundary])
            warp_vol = nn_trf_model_linear.predict([X_vol, pred[1]])[0, ..., 0]
            warp_seg = nn_trf_model_nearest.predict([X_seg, pred[1]])[0, ...,
                                                                      0]

        vals, _ = dice(warp_seg, fixing_seg, label, nargout=2)
        dice_result.append(vals)

        print('Dice mean: {:.3f} ({:.3f})'.format(np.mean(vals), np.std(vals)))

    dice_result = np.array(dice_result)
    print('Average dice mean: {:.3f} ({:.3f})'.format(np.mean(dice_result),
                                                      np.std(dice_result)))