Ejemplo n.º 1
0
def getnet():
    model_file = 'C:/Users/cisguest/Downloads/voxelmorph-master/voxelmorph-master/my_models/1500.h5'
    nf_enc = [16, 32, 32, 32]
    nf_dec = [32, 32, 32, 32, 16, 3]
    #    atlas_vol = np.load(fixed)['vol'][np.newaxis, ..., np.newaxis]
    #    vol_size = atlas_vol.shape[1:-1]
    vol_size = (480, 640)
    bidir = 0
    net = networks.miccai2018_net(vol_size, nf_enc, nf_dec, bidir=bidir)
    net.load_weights(model_file)
    return net


#def main():
#    net = getnet()
#    mapImages('C:/Users/cisguest/Downloads/iris/dark iris/301.png','C:/Users/cisguest/Downloads/iris/dark iris/301.png',net)
#
#if __name__ == "__main__":
#    main()
#%%
#[moved, warp] = net.predict([movIm, fixIm])
#plt.figure(figsize=(10,5),dpi=250)
#plt.subplot(221)
#plt.imshow(warp[0,:,:,0])
#plt.colorbar()
#plt.subplot(222)
#plt.imshow(warp[0,:,:,1])
#plt.colorbar()
#plt.subplot(223)
#plt.imshow(warp[0,:,:,2])
#plt.colorbar()
#plt.subplot(224)
#plt.imshow(warp[0,:,:,3])
#plt.colorbar()
#
##%%
#
#plt.figure(figsize=(10,5),dpi=250)
#plt.subplot(211)
#plt.imshow(np.sqrt(((warp[0,:,:,0])**2+(warp[0,:,:,1])**2)))
#plt.colorbar()
#plt.title('Mag')
#plt.subplot(212)
#plt.imshow(np.arctan2(warp[0,:,:,0],warp[0,:,:,1]))
#plt.colorbar()
#plt.title('Phase')
Ejemplo n.º 2
0
def train(data_dir,
          atlas_file,
          model_dir,
          gpu_id,
          lr,
          nb_epochs,
          prior_lambda,
          image_sigma,
          steps_per_epoch,
          batch_size,
          load_model_file,
          bidir,
          initial_epoch=0):
    """
    model training function
    :param data_dir: folder with npz files for each subject.
    :param atlas_file: atlas filename. So far we support npz file with a 'vol' variable
    :param model_dir: model folder to save to
    :param gpu_id: integer specifying the gpu to use
    :param lr: learning rate
    :param nb_epochs: number of training iterations
    :param prior_lambda: the prior_lambda, the scalar in front of the smoothing laplacian, in MICCAI paper
    :param image_sigma: the image sigma in MICCAI paper
    :param steps_per_epoch: frequency with which to save models
    :param batch_size: Optional, default of 1. can be larger, depends on GPU memory and volume size
    :param load_model_file: optional h5 model file to initialize with
    :param bidir: logical whether to use bidirectional cost function
    """

    # load atlas from provided files. The atlas we used is 160x192x224.
    atlas_vol = np.load(atlas_file)['vol'][np.newaxis, ..., np.newaxis]
    vol_size = atlas_vol.shape[1:-1]
    # prepare data files
    # for the CVPR and MICCAI papers, we have data arranged in train/validate/test folders
    # inside each folder is a /vols/ and a /asegs/ folder with the volumes
    # and segmentations. All of our papers use npz formated data.
    train_vol_names = glob.glob(os.path.join(data_dir, '*.npz'))
    random.shuffle(train_vol_names)  # shuffle volume list
    assert len(train_vol_names) > 0, "Could not find any training data"

    # Diffeomorphic network architecture used in MICCAI 2018 paper
    nf_enc = [16, 32, 32, 32]
    nf_dec = [32, 32, 32, 32, 16, 3]

    # prepare model folder
    if not os.path.isdir(model_dir):
        os.mkdir(model_dir)

    # gpu handling
    gpu = '/gpu:%d' % 0  # gpu_id
    os.environ["CUDA_VISIBLE_DEVICES"] = gpu_id
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    config.allow_soft_placement = True
    set_session(tf.Session(config=config))

    # prepare the model
    with tf.device(gpu):
        # the MICCAI201 model takes in [image_1, image_2] and outputs [warped_image_1, velocity_stats]
        # in these experiments, we use image_2 as atlas
        model = networks.miccai2018_net(vol_size, nf_enc, nf_dec, bidir=bidir)

        # load initial weights
        if load_model_file is not None and load_model_file != "":
            model.load_weights(load_model_file)

        # save first iteration
        model.save(os.path.join(model_dir, '%02d.h5' % initial_epoch))

        # compile
        # note: best to supply vol_shape here than to let tf figure it out.
        flow_vol_shape = model.outputs[-1].shape[1:-1]
        loss_class = losses.Miccai2018(image_sigma,
                                       prior_lambda,
                                       flow_vol_shape=flow_vol_shape)
        if bidir:
            model_losses = [
                loss_class.recon_loss, loss_class.recon_loss,
                loss_class.kl_loss
            ]
            loss_weights = [0.5, 0.5, 1]
        else:
            model_losses = [loss_class.recon_loss, loss_class.kl_loss]
            loss_weights = [1, 1]

    # data generator
    nb_gpus = len(gpu_id.split(','))
    assert np.mod(batch_size, nb_gpus) == 0, \
        'batch_size should be a multiple of the nr. of gpus. ' + \
        'Got batch_size %d, %d gpus' % (batch_size, nb_gpus)

    train_example_gen = datagenerators.example_gen(train_vol_names,
                                                   batch_size=batch_size)
    atlas_vol_bs = np.repeat(atlas_vol, batch_size, axis=0)
    miccai2018_gen = datagenerators.miccai2018_gen(train_example_gen,
                                                   atlas_vol_bs,
                                                   batch_size=batch_size,
                                                   bidir=bidir)

    # prepare callbacks
    save_file_name = os.path.join(model_dir, '{epoch:02d}.h5')

    # fit generator
    with tf.device(gpu):

        # multi-gpu support
        if nb_gpus > 1:
            save_callback = nrn_gen.ModelCheckpointParallel(save_file_name)
            mg_model = multi_gpu_model(model, gpus=nb_gpus)

        # single gpu
        else:
            save_callback = ModelCheckpoint(save_file_name)
            mg_model = model

        mg_model.compile(optimizer=Adam(lr=lr),
                         loss=model_losses,
                         loss_weights=loss_weights)
        mg_model.fit_generator(miccai2018_gen,
                               initial_epoch=initial_epoch,
                               epochs=nb_epochs,
                               callbacks=[save_callback],
                               steps_per_epoch=steps_per_epoch,
                               verbose=1)
Ejemplo n.º 3
0
def test(
        gpu_id,
        model_dir,
        iter_num,
        compute_type='GPU',  # GPU or CPU
        vol_size=(160, 192, 224),
        nf_enc=[16, 32, 32, 32],
        nf_dec=[32, 32, 32, 32, 16, 3],
        save_file=None):
    """
    test via segmetnation propagation
    works by iterating over some iamge files, registering them to atlas,
    propagating the warps, then computing Dice with atlas segmentations
    """

    # GPU handling
    gpu = '/gpu:' + str(gpu_id)
    os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_id)
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    config.allow_soft_placement = True
    set_session(tf.Session(config=config))

    # load weights of model
    with tf.device(gpu):
        # if testing miccai run, should be xy indexing.
        net = networks.miccai2018_net(vol_size,
                                      nf_enc,
                                      nf_dec,
                                      use_miccai_int=False,
                                      indexing='ij')
        net.load_weights(os.path.join(model_dir, str(iter_num) + '.h5'))

        # compose diffeomorphic flow output model
        diff_net = keras.models.Model(net.inputs,
                                      net.get_layer('diffflow').output)

        # NN transfer model
        nn_trf_model = networks.nn_trf(vol_size, indexing='ij')

    # if CPU, prepare grid
    if compute_type == 'CPU':
        grid, xx, yy, zz = util.volshape2grid_3d(vol_size, nargout=4)

    # prepare a matrix of dice values
    dice_vals = np.zeros((len(good_labels), n_batches))
    for k in range(n_batches):
        # get data
        vol_name, seg_name = test_brain_strings[k].split(",")
        X_vol, X_seg = datagenerators.load_example_by_name(vol_name, seg_name)

        # predict transform
        with tf.device(gpu):
            pred = diff_net.predict([X_vol, atlas_vol])

        # Warp segments with flow
        if compute_type == 'CPU':
            flow = pred[0, :, :, :, :]
            warp_seg = util.warp_seg(X_seg,
                                     flow,
                                     grid=grid,
                                     xx=xx,
                                     yy=yy,
                                     zz=zz)

        else:  # GPU
            warp_seg = nn_trf_model.predict([X_seg, pred])[0, ..., 0]

        # compute Volume Overlap (Dice)
        dice_vals[:, k] = dice(warp_seg, atlas_seg, labels=good_labels)
        print('%3d %5.3f %5.3f' % (k, np.mean(
            dice_vals[:, k]), np.mean(np.mean(dice_vals[:, :k + 1]))))

        if save_file is not None:
            sio.savemat(save_file, {
                'dice_vals': dice_vals,
                'labels': good_labels
            })
Ejemplo n.º 4
0
def test(
        gpu_id,
        model_dir,
        iter_num,
        compute_type='GPU',  # GPU or CPU
        vol_size=(160, 192, 224),
        nf_enc=[16, 32, 32, 32],
        nf_dec=[32, 32, 32, 32, 16, 3],
        save_file=None):
    """
    test via segmetnation propagation
    works by iterating over some iamge files, registering them to atlas,
    propagating the warps, then computing Dice with atlas segmentations
    """

    # GPU handling
    gpu = '/gpu:' + str(gpu_id)
    os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_id)
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    config.allow_soft_placement = True
    set_session(tf.Session(config=config))

    # load weights of model
    with tf.device(gpu):
        # if testing miccai run, should be xy indexing.
        net = networks.miccai2018_net(vol_size,
                                      nf_enc,
                                      nf_dec,
                                      use_miccai_int=False,
                                      indexing='ij')
        net.load_weights(os.path.join(model_dir, str(iter_num) + '.h5'))

        # compose diffeomorphic flow output model
        diff_net = keras.models.Model(net.inputs,
                                      net.get_layer('diffflow').output)

        # NN transfer model
        nn_trf_model = networks.nn_trf(vol_size, indexing='ij')

    # if CPU, prepare grid
    if compute_type == 'CPU':
        grid, xx, yy, zz = util.volshape2grid_3d(vol_size, nargout=4)

    # get data
    X_vol = nib.load(r'D:\users\zzx\data\2018nor\pre\01.nii').get_data()
    X_vol = X_vol[np.newaxis, ..., np.newaxis]
    atlas_vol = nib.load(r'D:\users\zzx\data\2018nor\pre\01_a.nii').get_data()
    atlas_vol = atlas_vol[np.newaxis, ..., np.newaxis]

    X_mask = nib.load(
        r'D:\users\zzx\data\2018mask\pre\pig01_pre_final_r.nii').get_data()
    X_mask = X_mask[np.newaxis, ..., np.newaxis]
    X_mask[X_mask == np.max(X_mask)] = 1
    X_mask[X_mask != 1] = 0
    atlas_mask = nib.load(
        r'D:\users\zzx\data\2018mask\aft\pig01_02_final_r.nii').get_data()
    atlas_mask[atlas_mask == np.max(atlas_mask)] = 1
    atlas_mask[atlas_mask != 1] = 0
    ## feature point
    # X_feapt = np.zeros((160,192,224))
    # X_feapt[128,51,165] = 1
    # X_feapt = X_feapt[np.newaxis,...,np.newaxis]

    # predict transform
    with tf.device(gpu):
        pred = diff_net.predict([X_vol, atlas_vol])

    # Warp segments with flow
    if compute_type == 'CPU':
        flow = pred[0, :, :, :, :]
        warp_seg = util.warp_seg(X_mask, flow, grid=grid, xx=xx, yy=yy, zz=zz)

    else:  # GPU
        warp_mask = nn_trf_model.predict([X_mask, pred])[0, ..., 0]
        warp_vol = nn_trf_model.predict([X_vol, pred])[0, ..., 0]
        # pred_point1 = nn_trf_model.predict([X_feapt, pred])[0,...,0]
    print(X_vol.shape)
    # warp_vol = nib.Nifti1Image(warp_vol,np.eye(4))
    warp_vol = nib.Nifti1Image(warp_vol, np.eye(4))
    nib.save(warp_vol, r'D:\users\zzx\data\2018warp\1w.nii')
    # compute Volume Overlap (Dice)
    # X_mask = X_mask[0,...,0]
    # print(X_mask.shape, atlas_mask.shape,pred_point1.shape,np.where(pred_point1 != 0))
    dice_vals = dice(warp_mask, atlas_mask)
    # print('%3d %5.3f %5.3f' % (k, np.mean(dice_vals[:, k]), np.mean(np.mean(dice_vals[:, :k+1]))))
    print(dice_vals)
    if save_file is not None:
        sio.savemat(save_file, {'dice_vals': dice_vals})
def train(data_dir,
          atlas_file,
          model_dir,
          gpu_id,
          lr,
          nb_epochs,
          prior_lambda,
          image_sigma,
          steps_per_epoch,
          batch_size,
          load_model_file,
          bidir,
          bool_cc,
          initial_epoch=0):
    """
    model training function
    :param data_dir: folder with npz files for each subject.
    :param atlas_file: atlas filename. So far we support npz file with a 'vol' variable
    :param model_dir: model folder to save to
    :param gpu_id: integer specifying the gpu to use
    :param lr: learning rate
    :param nb_epochs: number of training iterations
    :param prior_lambda: the prior_lambda, the scalar in front of the smoothing laplacian, in MICCAI paper
    :param image_sigma: the image sigma in MICCAI paper
    :param steps_per_epoch: frequency with which to save models
    :param batch_size: Optional, default of 1. can be larger, depends on GPU memory and volume size
    :param load_model_file: optional h5 model file to initialize with
    :param bidir: logical whether to use bidirectional cost function
    :param bool_cc: Train CC or MICCAI version
    """

    # load atlas from provided files. The atlas we used is 160x192x224.
    #atlas_vol = np.load(atlas_file)['vol'][np.newaxis, ..., np.newaxis]
    vm_dir = '/home/jdram/voxelmorph/'
    base = np.load(
        os.path.join(vm_dir, "data",
                     "ts12_dan_a88_fin_o_trim_adpc_002661_256.npy"))
    monitor = np.load(
        os.path.join(vm_dir, "data",
                     "ts12_dan_a05_fin_o_trim_adpc_002682_256.npy"))
    #base    = np.load(os.path.join(vm_dir, "data","ts12_dan_a88_fin_o_trim_adpc_002661_abs.npy"))
    #monitor = np.load(os.path.join(vm_dir, "data","ts12_dan_a05_fin_o_trim_adpc_002682_abs.npy"))

    #vol_size = (64, 64, 64)
    vol_size = (64, 64, 256 - 64)
    #vol_size = (128, 128, 256)

    # prepare data files
    # for the CVPR and MICCAI papers, we have data arranged in train/validate/test folders
    # inside each folder is a /vols/ and a /asegs/ folder with the volumes
    # and segmentations. All of our papers use npz formated data.
    #train_vol_names = glob.glob(os.path.join(data_dir, '*.npy'))
    #random.shuffle(train_vol_names)  # shuffle volume list
    #assert len(train_vol_names) > 0, "Could not find any training data"

    # Diffeomorphic network architecture used in MICCAI 2018 paper
    nf_enc = [32, 64, 64, 64]
    nf_dec = [64, 64, 64, 64, 32, 3]

    # prepare model folder
    if not os.path.isdir(model_dir):
        os.mkdir(model_dir)
    tf.reset_default_graph()

    if bool_cc:
        pre_net = "cc_"
    else:
        if bidir:
            pre_net = "miccai_bidir_"
        else:
            pre_net = "miccai_"

    # gpu handling
    gpu = '/device:GPU:%d' % int(gpu_id)  # gpu_id
    os.environ["CUDA_VISIBLE_DEVICES"] = gpu_id
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    config.allow_soft_placement = True
    set_session(tf.Session(config=config))

    # prepare the model
    with tf.device(gpu):
        # prepare the model
        # in the CVPR layout, the model takes in [image_1, image_2] and outputs [warped_image_1, flow]
        # in the experiments, we use image_2 as atlas
        if bool_cc:
            model = networks.cvpr2018_net(vol_size, nf_enc, nf_dec)
        else:
            model = networks.miccai2018_net(vol_size,
                                            nf_enc,
                                            nf_dec,
                                            bidir=bidir,
                                            vel_resize=.5)

        # load initial weights
        if load_model_file is not None and load_model_file != "":
            print('loading', load_model_file)
            model.load_weights(load_model_file)

        # save first iteration
        model.save(os.path.join(model_dir, f'{pre_net}{initial_epoch:02d}.h5'))
        model.summary()

        if bool_cc:
            model_losses = [losses.NCC().loss, losses.Grad('l2').loss]
            loss_weights = [1.0, 0.01]  # recommend 1.0 for ncc, 0.01 for mse
        else:
            flow_vol_shape = model.outputs[-1].shape[1:-1]
            loss_class = losses.Miccai2018(image_sigma,
                                           prior_lambda,
                                           flow_vol_shape=flow_vol_shape)
            if bidir:
                model_losses = [
                    loss_class.recon_loss, loss_class.recon_loss,
                    loss_class.kl_loss
                ]
                loss_weights = [0.5, 0.5, 1]
            else:
                model_losses = [loss_class.recon_loss, loss_class.kl_loss]
                loss_weights = [1, 1]

    segy_gen = datagenerators.segy_gen(base, monitor, batch_size=batch_size)

    # prepare callbacks
    save_file_name = os.path.join(model_dir, pre_net + '{epoch:02d}.h5')

    with tf.device(gpu):
        # fit generator
        save_callback = ModelCheckpoint(save_file_name, period=5)
        csv_cb = CSVLogger(f'{pre_net}log.csv')
        nan_cb = TerminateOnNaN()
        rlr_cb = ReduceLROnPlateau(monitor='loss', verbose=1)
        els_cb = EarlyStopping(monitor='loss',
                               patience=15,
                               verbose=1,
                               restore_best_weights=True)
        cbs = [save_callback, csv_cb, nan_cb, rlr_cb, els_cb]
        mg_model = model

        # compile
        mg_model.compile(optimizer=Adam(lr=lr),
                         loss=model_losses,
                         loss_weights=loss_weights)

        mg_model.fit(
            [base, monitor],
            [monitor, np.zeros_like(base)],
            initial_epoch=initial_epoch,
            batch_size=8,
            epochs=nb_epochs,
            callbacks=cbs,
            #steps_per_epoch=steps_per_epoch,
            verbose=1)
def test(
        gpu_id,
        model_dir,
        iter_num,
        compute_type='GPU',  # GPU or CPU
        vol_size=(160, 192, 224),
        nf_enc=[16, 32, 32, 32],
        nf_dec=[32, 32, 32, 32, 16, 3],
        save_file=None):
    """
    test via segmetnation propagation
    works by iterating over some iamge files, registering them to atlas,
    propagating the warps, then computing Dice with atlas segmentations
    """

    # GPU handling
    gpu = '/gpu:' + str(gpu_id)
    os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_id)
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    config.allow_soft_placement = True
    set_session(tf.Session(config=config))

    # load weights of model
    with tf.device(gpu):
        # if testing miccai run, should be xy indexing.
        net = networks.miccai2018_net(vol_size,
                                      nf_enc,
                                      nf_dec,
                                      use_miccai_int=True,
                                      indexing='xy')
        net.load_weights(os.path.join(model_dir, str(iter_num) + '.h5'))

        # compose diffeomorphic flow output model
        diff_net = keras.models.Model(net.inputs,
                                      net.get_layer('diffflow').output)

        # NN transfer model
        nn_trf_model = networks.nn_trf(vol_size)

    # if CPU, prepare grid
    if compute_type == 'CPU':
        grid, xx, yy, zz = util.volshape2grid_3d(vol_size, nargout=4)

    # prepare a matrix of dice values
    dice_vals = np.zeros((len(good_labels), n_batches))
    for k in range(n_batches):
        # get data
        vol_name, seg_name = test_brain_strings[k].split(",")
        X_vol, X_seg = datagenerators.load_example_by_name(vol_name, seg_name)

        # predict transform
        with tf.device(gpu):
            pred = diff_net.predict([X_vol, atlas_vol])

        # Warp segments with flow
        if compute_type == 'CPU':
            flow = pred[0, :, :, :, :]
            warp_seg = util.warp_seg(X_seg,
                                     flow,
                                     grid=grid,
                                     xx=xx,
                                     yy=yy,
                                     zz=zz)

        else:  # GPU
            # Rigid registration only by GPU
            flow = pred[0, :, :, :, :]
            # Compute A(all about coordinate computation)
            x = np.linspace(0, 160 - 16, sample_num)
            x = x.astype(np.int32)
            y = np.linspace(0, 190 - 19, sample_num)
            y = y.astype(np.int32)
            z = np.linspace(0, 220 - 22, sample_num)
            z = z.astype(np.int32)
            index = np.rollaxis(np.array(np.meshgrid(x, y, z)), 0, 4)
            x = index[:, :, :, 0]
            y = index[:, :, :, 1]
            z = index[:, :, :, 2]

            # Y in formula
            x_flow = np.arange(vol_size[0])
            y_flow = np.arange(vol_size[1])
            z_flow = np.arange(vol_size[2])
            grid = np.rollaxis(np.array((np.meshgrid(y_flow, x_flow, z_flow))),
                               0, 4)  # original coordinate
            grid_x = grid_sample(x, y, z, grid[:, :, :, 0], sample_num)
            grid_y = grid_sample(x, y, z, grid[:, :, :, 1], sample_num)
            grid_z = grid_sample(x, y, z, grid[:, :, :, 2],
                                 sample_num)  # X (10,10,10)

            sample = flow + grid
            sample_x = grid_sample(x, y, z, sample[:, :, :, 0], sample_num)
            sample_y = grid_sample(x, y, z, sample[:, :, :, 1], sample_num)
            sample_z = grid_sample(x, y, z, sample[:, :, :, 2],
                                   sample_num)  # Y (10,10,10)

            sum_x = np.sum(flow[:, :, :, 0])
            sum_y = np.sum(flow[:, :, :, 1])
            sum_z = np.sum(flow[:, :, :, 2])

            ave_x = sum_x / (vol_size[0] * vol_size[1] * vol_size[2])
            ave_y = sum_y / (vol_size[0] * vol_size[1] * vol_size[2])
            ave_z = sum_z / (vol_size[0] * vol_size[1] * vol_size[2])

            # formula
            Y = np.zeros((10, 10, 10, grid_dimension))
            X = np.zeros((10, 10, 10, grid_dimension))
            T = np.array([ave_x, ave_y, ave_z, 1])  # (4,1)
            # R = np.zeros((10, 10, 10, grid_dimension, grid_dimension))

            for i in np.arange(10):
                for j in np.arange(10):
                    for z in np.arange(10):
                        Y[i, j, z, :] = np.array([
                            sample_x[i, j, z], sample_y[i, j, z],
                            sample_z[i, j, z], 1
                        ])

            for i in np.arange(10):
                for j in np.arange(10):
                    for z in np.arange(10):
                        X[i, j, z, :] = np.array([
                            grid_x[i, j, z], grid_y[i, j, z], grid_z[i, j, z],
                            1
                        ])

            X = X.reshape((1000, grid_dimension))
            Y = Y.reshape((1000, grid_dimension))
            R = np.dot(
                np.dot(np.linalg.pinv(np.dot(np.transpose(X), X)),
                       np.transpose(X)), Y)  # R

            # build new grid(Use R to do the spatial transform)
            shifted_x = np.arange(vol_size[0])
            shifted_y = np.arange(vol_size[1])
            shifted_z = np.arange(vol_size[2])
            print(shifted_x.shape)
            print(shifted_y.shape)
            print(shifted_z.shape)
            shifted_grid = np.rollaxis(
                np.array((np.meshgrid(shifted_y, shifted_x, shifted_z))), 0, 4)
            print(shifted_grid.shape)
            for i in np.arange(vol_size[0]):
                for j in np.arange(vol_size[1]):
                    for z in np.arange(vol_size[2]):
                        coordinates = np.dot(
                            R,
                            np.array([i, j, z, 1]).reshape(4, 1)) + T.reshape(
                                4, 1)
                        print("voxel." + '(' + str(i) + ',' + str(j) + ',' +
                              str(z) + ')')
                        shifted_grid[i, j, z, 0] = coordinates[0]
                        shifted_grid[i, j, z, 1] = coordinates[1]
                        shifted_grid[i, j, z, 2] = coordinates[2]

            # interpolation
            xx = np.arange(vol_size[1])
            yy = np.arange(vol_size[0])
            zz = np.arange(vol_size[2])
            warp_seg = interpn((yy, xx, zz),
                               X_seg[0, :, :, :, 0],
                               shifted_grid,
                               method='nearest',
                               bounds_error=False,
                               fill_value=0)

            # CVPR
            grid = np.rollaxis(np.array(np.meshgrid(xx, yy, zz)), 0, 4)
            sample = flow + grid
            sample = np.stack(
                (sample[:, :, :, 1], sample[:, :, :, 0], sample[:, :, :, 2]),
                3)
            warp_seg2 = interpn((yy, xx, zz),
                                X_seg[0, :, :, :, 0],
                                sample,
                                method='nearest',
                                bounds_error=False,
                                fill_value=0)

            # compute dice
            vals, _ = dice(warp_seg, atlas_seg, labels=labels, nargout=2)
            vals2, _ = dice(X_seg[0, :, :, :, 0],
                            atlas_seg,
                            labels=labels,
                            nargout=2)
            vals3, _ = dice(warp_seg2, atlas_seg, labels=labels, nargout=2)
            print("dice before:")
            print(np.mean(vals2), np.std(vals2))
            print("dice after deformable registration:")
            print(np.mean(vals3), np.std(vals3))
            print("dice after rigid registration:")
            print(np.mean(vals), np.std(vals))
            warp_seg = nn_trf_model.predict([X_seg, pred])[0, ..., 0]

        # compute Volume Overlap (Dice)
        dice_vals[:, k] = dice(warp_seg, atlas_seg, labels=good_labels)
        print('%3d %5.3f %5.3f' % (k, np.mean(
            dice_vals[:, k]), np.mean(np.mean(dice_vals[:, :k + 1]))))

        if save_file is not None:
            sio.savemat(save_file, {
                'dice_vals': dice_vals,
                'labels': good_labels
            })
Ejemplo n.º 7
0
def train(model_dir,
          gpu_id,
          lr,
          n_iterations,
          alpha,
          image_sigma,
          model_save_iter,
          batch_size=1):
    """
    model training function
    :param model_dir: model folder to save to
    :param gpu_id: integer specifying the gpu to use
    :param lr: learning rate
    :param n_iterations: number of training iterations
    :param alpha: the alpha, the scalar in front of the smoothing laplacian, in MICCAI paper
    :param image_sigma: the image sigma in MICCAI paper
    :param model_save_iter: frequency with which to save models
    :param batch_size: Optional, default of 1. can be larger, depends on GPU memory and volume size
    """

    # prepare model folder
    if not os.path.isdir(model_dir):
        os.mkdir(model_dir)
    print(model_dir)

    # gpu handling
    gpu = '/gpu:' + str(gpu_id)
    os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_id)
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    config.allow_soft_placement = True
    set_session(tf.Session(config=config))

    # Diffeomorphic network architecture used in MICCAI 2018 paper
    nf_enc = [16, 32, 32, 32]
    nf_dec = [32, 32, 32, 32, 16, 3]

    # prepare the model
    # in the CVPR layout, the model takes in [image_1, image_2] and outputs [warped_image_1, velocity_stats]
    # in the experiments, we use image_2 as atlas
    with tf.device(gpu):
        # miccai 2018 used xy indexing.
        model = networks.miccai2018_net(vol_size,
                                        nf_enc,
                                        nf_dec,
                                        use_miccai_int=True,
                                        indexing='xy')

        # compile
        model_losses = [losses.kl_l2loss(image_sigma), losses.kl_loss(alpha)]
        model.compile(optimizer=Adam(lr=lr), loss=model_losses)

        # save first iteration
        model.save(os.path.join(model_dir, str(0) + '.h5'))

    train_example_gen = datagenerators.example_gen(train_vol_names)
    zeros = np.zeros((1, *vol_size, 3))

    # train. Note: we use train_on_batch and design out own print function as this has enabled
    # faster development and debugging, but one could also use fit_generator and Keras callbacks.
    for step in range(1, n_iterations):

        # get_data
        X = next(train_example_gen)[0]

        # train
        with tf.device(gpu):
            train_loss = model.train_on_batch([X, atlas_vol],
                                              [atlas_vol, zeros])

        if not isinstance(train_loss, list):
            train_loss = [train_loss]

        # print
        print_loss(step, 0, train_loss)

        # save model
        with tf.device(gpu):
            if (step % model_save_iter == 0) or step < 10:
                model.save(os.path.join(model_dir, str(step) + '.h5'))
Ejemplo n.º 8
0
def train(data_dir,
          model,
          model_name,
          gpu_id,
          lr,
          nb_epochs,
          reg_param,
          steps_per_epoch,
          batch_size,
          load_model_file,
          atlas_file,
          max_clip,
          distance,
          patch_size,
          use_ssc,
          use_gaussian_kernel,
          use_fixed_var,
          use_miccai,
          initial_epoch=0):
    """
    model training function
    :param data_dir: folder with npz files for each subject.
    :param atlas_file: atlas filename. So far we support npz file with a 'vol' variable
    :param model: either vm1 or vm2 (based on CVPR 2018 paper)
    :param model_dir: the model directory to save to
    :param gpu_id: integer specifying the gpu to use
    :param lr: learning rate
    :param n_iterations: number of training iterations
    :param reg_param: the smoothness/reconstruction tradeoff parameter (lambda in CVPR paper)
    :param steps_per_epoch: frequency with which to save models
    :param batch_size: Optional, default of 1. can be larger, depends on GPU memory and volume size
    :param load_model_file: optional h5 model file to initialize with
    :param data_loss: data_loss: 'mse' or 'ncc
    """

    # load atlas from provided files. The atlas we used is 160x192x224.
    atlas_vol = nib.load(atlas_file).get_data()[np.newaxis, ..., np.newaxis]
    atlas_vol = atlas_vol / np.max(atlas_vol) * max_clip
    # atlas_vol = nib.load('../data/t1_atlas.nii').get_data()[np.newaxis,...,np.newaxis]
    vol_size = atlas_vol.shape[1:-1]
    # prepare data files
    # for the CVPR and MICCAI papers, we have data arranged in train/validate/test folders
    # inside each folder is a /vols/ and a /asegs/ folder with the volumes
    # and segmentations. All of our papers use npz formated data.
    train_vol_names = glob.glob(os.path.join(data_dir, '*.npz'))
    random.shuffle(train_vol_names)  # shuffle volume list
    assert len(train_vol_names) > 0, "Could not find any training data"

    # UNET filters for voxelmorph-1 and voxelmorph-2,
    # these are architectures presented in CVPR 2018
    nf_enc = [16, 32, 32, 32]
    if model == 'vm1':
        nf_dec = [32, 32, 32, 32, 8, 8]
    elif model == 'vm2':
        nf_dec = [32, 32, 32, 32, 32, 16, 16]
    else:  # 'vm2double':
        nf_enc = [f * 2 for f in nf_enc]
        nf_dec = [f * 2 for f in [32, 32, 32, 32, 32, 16, 16]]

    model_dir = "../models/" + model_name
    # prepare model folder
    if not os.path.isdir(model_dir):
        os.mkdir(model_dir)

    # GPU handling
    gpu = '/gpu:%d' % gpu_id
    os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_id)
    print(gpu)
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    config.allow_soft_placement = True
    set_session(tf.Session(config=config))

    # prepare the model
    with tf.device(gpu):
        # prepare the model
        # in the CVPR layout, the model takes in [image_1, image_2] and outputs [warped_image_1, flow]
        # in the experiments, we use image_2 as atlas
        if use_miccai:
            print('miccai: therefore diffeomorphic')
            model = networks.miccai2018_net(vol_size, nf_enc, nf_dec)
        else:
            model = networks.cvpr2018_net(vol_size, nf_enc, nf_dec)

        # load initial weights
        if load_model_file is not None and load_model_file != '':
            print('loading', load_model_file)
            model.load_weights(load_model_file)

        # save first iteration
        model.save(os.path.join(model_dir, '%02d.h5' % initial_epoch))

    # data generator
    # nb_gpus = len(gpu_id.split(','))
    # assert np.mod(batch_size, nb_gpus) == 0, \
    #     'batch_size should be a multiple of the nr. of gpus. ' + \
    #     'Got batch_size %d, %d gpus' % (batch_size, nb_gpus)
    nb_gpus = 1

    train_example_gen = datagenerators.example_gen(train_vol_names,
                                                   batch_size=batch_size)
    atlas_vol_bs = np.repeat(atlas_vol, batch_size, axis=0)

    if use_miccai:
        data_gen = datagenerators.cvpr2018_gen(train_example_gen,
                                               atlas_vol_bs,
                                               batch_size=batch_size)
    else:
        data_gen = datagenerators.miccai2018_gen(train_example_gen,
                                                 atlas_vol_bs,
                                                 batch_size=batch_size)

    # prepare callbacks
    save_file_name = os.path.join(model_dir, '{epoch:02d}.h5')

    loss_function = losses.mind(distance,
                                patch_size,
                                use_ssc=use_ssc,
                                use_gaussian_kernel=use_gaussian_kernel,
                                use_fixed_var=use_fixed_var)

    # fit generator
    with tf.device(gpu):

        # multi-gpu support
        if nb_gpus > 1:
            save_callback = nrn_gen.ModelCheckpointParallel(save_file_name)
            mg_model = multi_gpu_model(model, gpus=nb_gpus)

        # single-gpu
        else:
            save_callback = ModelCheckpoint(save_file_name, verbose=1)
            mg_model = model

        # compile
        mg_model.compile(optimizer=Adam(lr=lr),
                         loss=[loss_function,
                               losses.Grad('l2').loss],
                         loss_weights=[1.0, reg_param])

        # fit
        mg_model.fit_generator(data_gen,
                               initial_epoch=initial_epoch,
                               epochs=nb_epochs,
                               callbacks=[save_callback],
                               steps_per_epoch=steps_per_epoch,
                               verbose=1)
Ejemplo n.º 9
0
def test(model_name,
         epoch,
         gpu_id,
         n_test,
         invert_images,
         max_clip,
         indexing,
         use_miccai,
         atlas_file,
         atlas_seg_file,
         normalize_atlas,
         vol_size=(160, 192, 224),
         nf_enc=[16, 32, 32, 32],
         nf_dec=[32, 32, 32, 32, 32, 16, 16]):
    start_time = time.time()
    good_labels = sio.loadmat('../data/labels.mat')['labels'][0]

    # setup
    gpu = '/gpu:' + str(gpu_id)
    #     print(gpu)
    os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'

    restrict_GPU_tf(str(gpu_id))
    restrict_GPU_keras(str(gpu_id))

    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    config.allow_soft_placement = True
    set_session(tf.Session(config=config))

    atlas_vol = nib.load(atlas_file).get_data()[np.newaxis, ..., np.newaxis]
    atlas_seg = nib.load(atlas_seg_file).get_data()

    if normalize_atlas:
        atlas_vol = atlas_vol / np.max(atlas_vol) * max_clip

    sz = atlas_seg.shape
    z_inp1 = tf.placeholder(tf.float32, sz)
    z_inp2 = tf.placeholder(tf.float32, sz)
    z_out = losses.kdice(z_inp1, z_inp2, good_labels)
    kdice_fn = K.function([z_inp1, z_inp2], [z_out])

    # load weights of model
    with tf.device(gpu):
        if use_miccai:
            net = networks.miccai2018_net(vol_size, nf_enc, nf_dec)
            net.load_weights('../models/' + model_name + '/' + str(epoch) +
                             '.h5')
            trf_model = networks.trf_core(
                (vol_size[0] // 2, vol_size[1] // 2, vol_size[2] // 2),
                nb_feats=len(good_labels) + 1,
                indexing=indexing)

        else:
            net = networks.cvpr2018_net(vol_size, nf_enc, nf_dec)
            net.load_weights('../models/' + model_name + '/' + str(epoch) +
                             '.h5')
            trf_model = networks.trf_core(vol_size,
                                          nb_feats=len(good_labels) + 1,
                                          indexing=indexing)

    dice_means = []
    dice_stds = []

    for step in range(0, n_test):
        # get data
        if n_test == 1:
            X_vol = nib.load('../t1_atlas.nii').get_data()[np.newaxis, ...,
                                                           np.newaxis]
            X_seg = nib.load('../t1_atlas_seg.nii').get_data()[np.newaxis, ...,
                                                               np.newaxis]
        else:
            vol_name, seg_name = test_brain_strings[step].split(",")
            X_vol, X_seg = datagenerators.load_example_by_name(
                vol_name, seg_name)

        if invert_images:
            X_vol = max_clip - X_vol

        with tf.device(gpu):
            pred = net.predict([X_vol, atlas_vol])
            all_labels = np.unique(X_seg)
            for l in all_labels:
                if l not in good_labels:
                    X_seg[X_seg == l] = 0
            for i in range(len(good_labels)):
                X_seg[X_seg == good_labels[i]] = i + 1
            seg_onehot = tf.keras.utils.to_categorical(
                X_seg[0, :, :, :, 0], num_classes=len(good_labels) + 1)
            warp_seg_onehot = trf_model.predict(
                [seg_onehot[tf.newaxis, :, :, :, :], pred[1]])
            warp_seg = np.argmax(warp_seg_onehot[0, :, :, :, :], axis=3)

            warp_seg_correct = np.zeros(warp_seg.shape)
            for i in range(len(good_labels)):
                warp_seg_correct[warp_seg == i + 1] = good_labels[i]

            dice = kdice_fn([warp_seg_correct, atlas_seg])

            mean = np.mean(dice)
            std = np.std(dice)
            dice_means.append(mean)
            dice_stds.append(std)
            print(step, mean, std)

    print('average dice:', np.mean(dice_means))
    print('std over patients:', np.std(dice_means))
    print('average std over regions:', np.mean(dice_stds))
    print('time taken:', time.time() - start_time)
def test(
        gpu_id,
        iter_num,
        compute_type='GPU',  # GPU or CPU
        vol_size=(160, 192, 224),
        nf_enc=[16, 32, 32, 32],
        nf_dec=[32, 32, 32, 32, 16, 3],
        save_file=None):
    """
    test by segmentation, compute dice between atlas_seg and warp_seg
    :param gpu_id: gpu id
    :param iter_num: specify the model to read
    :param compute_type: CPU/GPU
    :param vol_size: volume size
    :param nf_enc: number of encoder
    :param nf_dec: number of decoder
    :param save_file: None
    :return: None
    """

    # GPU handling
    gpu = '/gpu:' + str(gpu_id)
    os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_id)
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    config.allow_soft_placement = True
    set_session(tf.Session(config=config))

    # load weights of model
    with tf.device(gpu):
        # if testing miccai run, should be xy indexing.
        net = networks.miccai2018_net(vol_size,
                                      nf_enc,
                                      nf_dec,
                                      use_miccai_int=True,
                                      indexing='xy')
        model_dir = "/home/ys895/rigid_diff_model/"
        net.load_weights(os.path.join(model_dir, str(iter_num) + '.h5'))

        # compose diffeomorphic flow output model
        diff_net = keras.models.Model(net.inputs,
                                      net.get_layer('diffflow').output)

        # NN transfer model
        nn_trf_model = networks.nn_trf(vol_size)

    # if CPU, prepare grid
    if compute_type == 'CPU':
        grid, xx, yy, zz = util.volshape2grid_3d(vol_size, nargout=4)

    # prepare a matrix of dice values
    dice_vals = np.zeros((len(good_labels), n_batches))
    for k in range(n_batches):
        # get data
        vol_name, seg_name = test_brain_strings[k].split(",")
        X_vol, X_seg = datagenerators.load_example_by_name(vol_name, seg_name)
        orig_vol = X_vol
        orig_seg = X_seg

        theta = 0
        beta = 5
        omega = 0
        X_seg = rotate_img(X_seg[0, :, :, :, 0],
                           theta=theta,
                           beta=beta,
                           omega=omega)
        X_vol = rotate_img(X_vol[0, :, :, :, 0],
                           theta=theta,
                           beta=beta,
                           omega=omega)
        X_seg = X_seg.reshape((1, ) + X_seg.shape + (1, ))
        X_vol = X_vol.reshape((1, ) + X_vol.shape + (1, ))

        sample_num = 30
        grid_dimension = 4

        # predict transform
        with tf.device(gpu):
            pred = diff_net.predict([X_vol, atlas_vol])

        # Warp segments with flow
        if compute_type == 'CPU':
            flow = pred[0, :, :, :, :]
            warp_seg = util.warp_seg(X_seg,
                                     flow,
                                     grid=grid,
                                     xx=xx,
                                     yy=yy,
                                     zz=zz)
        else:  # GPU

            flow = pred[0, :, :, :, :]

            # sample coordinate(sample_num * sample_num * sample_num)
            x = np.linspace(0, (vol_size[0] / sample_num) * (sample_num - 1),
                            sample_num)
            x = x.astype(np.int32)
            y = np.linspace(0, (vol_size[1] / sample_num) * (sample_num - 1),
                            sample_num)
            y = y.astype(np.int32)
            z = np.linspace(0, (vol_size[2] / sample_num) * (sample_num - 1),
                            sample_num)
            z = z.astype(np.int32)
            index = np.rollaxis(np.array(np.meshgrid(y, x, z)), 0, 4)
            x = index[:, :, :, 1]
            y = index[:, :, :, 0]
            z = index[:, :, :, 2]

            # Y in formula
            x_flow = np.arange(vol_size[0])
            y_flow = np.arange(vol_size[1])
            z_flow = np.arange(vol_size[2])
            grid = np.rollaxis(np.array((np.meshgrid(y_flow, x_flow, z_flow))),
                               0, 4)  # original coordinate
            grid_x = grid_sample(x, y, z, grid[:, :, :, 1], sample_num)
            grid_y = grid_sample(x, y, z, grid[:, :, :, 0], sample_num)
            grid_z = grid_sample(x, y, z, grid[:, :, :, 2],
                                 sample_num)  # X (10,10,10)

            sample = flow + grid
            sample_x = grid_sample(x, y, z, sample[:, :, :, 1], sample_num)
            sample_y = grid_sample(x, y, z, sample[:, :, :, 0], sample_num)
            sample_z = grid_sample(x, y, z, sample[:, :, :, 2],
                                   sample_num)  # Y (10,10,10)

            sum_x = np.sum(flow[:, :, :, 1])
            sum_y = np.sum(flow[:, :, :, 0])
            sum_z = np.sum(flow[:, :, :, 2])

            ave_x = sum_x / (vol_size[0] * vol_size[1] * vol_size[2])
            ave_y = sum_y / (vol_size[0] * vol_size[1] * vol_size[2])
            ave_z = sum_z / (vol_size[0] * vol_size[1] * vol_size[2])

            # formula
            Y = np.zeros((sample_num, sample_num, sample_num, grid_dimension))
            X = np.zeros((sample_num, sample_num, sample_num, grid_dimension))
            T = np.array([ave_x, ave_y, ave_z, 1])  # (4,1)
            print(T)

            for i in np.arange(sample_num):
                for j in np.arange(sample_num):
                    for z in np.arange(sample_num):
                        Y[i, j, z, :] = np.array([
                            sample_x[i, j, z], sample_y[i, j, z],
                            sample_z[i, j, z], 1
                        ])
                        #Y[i, j, z, :] = Y[i, j, z, :] - np.array([ave_x, ave_y, ave_z, 0])  # amend: Y` = Y - T

            for i in np.arange(sample_num):
                for j in np.arange(sample_num):
                    for z in np.arange(sample_num):
                        X[i, j, z, :] = np.array([
                            grid_x[i, j, z], grid_y[i, j, z], grid_z[i, j, z],
                            1
                        ])

            X = X.reshape(
                (sample_num * sample_num * sample_num, grid_dimension))
            Y = Y.reshape(
                (sample_num * sample_num * sample_num, grid_dimension))
            R = np.dot(
                np.dot(np.linalg.pinv(np.dot(np.transpose(X), X)),
                       np.transpose(X)), Y)  # R(4, 4)
            print(R)
            beta = -(beta / 180) * math.pi
            R = np.array([[math.cos(beta), 0, -math.sin(beta), 0],
                          [0, 1, 0, 0], [math.sin(beta), 0,
                                         math.cos(beta), 0], [0, 0, 0, 1]])
            #R = R.transpose()

            # build new grid(Use R to do the spatial transform)
            shifted_x = np.arange(vol_size[0])
            shifted_y = np.arange(vol_size[1])
            shifted_z = np.arange(vol_size[2])
            shifted_grid = np.rollaxis(
                np.array((np.meshgrid(shifted_y, shifted_x, shifted_z))), 0, 4)

            # some required matrixs
            T1 = np.array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0],
                           [
                               -int(vol_size[0] / 2), -int(vol_size[1] / 2),
                               -int(vol_size[2] / 2), 1
                           ]])

            T2 = np.array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0],
                           [
                               int(vol_size[0] / 2),
                               int(vol_size[1] / 2),
                               int(vol_size[2] / 2), 1
                           ]])

            for i in np.arange(vol_size[0]):
                for j in np.arange(vol_size[1]):
                    for z in np.arange(vol_size[2]):
                        #coordinates = np.dot(R, np.array([i, j, z, 1]).reshape(4, 1)) + T.reshape(4, 1)
                        coordinates = np.dot(
                            np.dot(
                                np.dot(
                                    np.array([i, j, z, 1]).reshape(1, 4), T1),
                                R), T2)  # new implementation
                        # print("voxel." + '(' + str(i) + ',' + str(j) + ',' + str(z) + ')')
                        shifted_grid[i, j, z, 1] = coordinates[0, 0]
                        shifted_grid[i, j, z, 0] = coordinates[0, 1]
                        shifted_grid[i, j, z, 2] = coordinates[0, 2]

            # interpolation
            xx = np.arange(vol_size[1])
            yy = np.arange(vol_size[0])
            zz = np.arange(vol_size[2])
            shifted_grid = np.stack(
                (shifted_grid[:, :, :, 1], shifted_grid[:, :, :, 0],
                 shifted_grid[:, :, :, 2]), 3
            )  # notice: the shifted_grid is reverse in x and y, so this step is used for making it back.
            warp_seg = interpn((yy, xx, zz),
                               X_seg[0, :, :, :, 0],
                               shifted_grid,
                               method='nearest',
                               bounds_error=False,
                               fill_value=0)  # rigid registration
            warp_vol = interpn((yy, xx, zz),
                               X_vol[0, :, :, :, 0],
                               shifted_grid,
                               method='nearest',
                               bounds_error=False,
                               fill_value=0)  # rigid registration

        # compute Volume Overlap (Dice)
        dice_vals[:, k] = dice(warp_seg,
                               orig_seg[0, :, :, :, 0],
                               labels=good_labels)
        print('%3d %5.3f %5.3f' % (k, np.mean(
            dice_vals[:, k]), np.mean(np.mean(dice_vals[:, :k + 1]))))

        if save_file is not None:
            sio.savemat(save_file, {
                'dice_vals': dice_vals,
                'labels': good_labels
            })

        # specify slice
        num_slice = 90

        plt.figure()
        plt.subplot(1, 3, 1)
        plt.imshow(orig_vol[0, :, num_slice, :, 0])
        plt.subplot(1, 3, 2)
        plt.imshow(X_vol[0, :, num_slice, :, 0])
        plt.subplot(1, 3, 3)
        plt.imshow(warp_vol[:, num_slice, :])
        plt.savefig("slice" + str(num_slice) + '_' + str(k) + ".png")

        plt.figure()
        plt.subplot(1, 3, 1)
        plt.imshow(flow[:, num_slice, :, 1])
        plt.subplot(1, 3, 2)
        plt.imshow(flow[:, num_slice, :, 0])
        plt.subplot(1, 3, 3)
        plt.imshow(flow[:, num_slice, :, 2])
        plt.savefig("flow.png")