Esempio n. 1
0
def register(gpu_id, moving, fixed, model_file, out_img, out_warp):
    """
    register moving and fixed. 
    """
    assert model_file, "A model file is necessary"
    assert out_img or out_warp, "output image or warp file needs to be specified"

    # GPU handling
    if gpu_id is not None:
        gpu = '/gpu:' + str(gpu_id)
        os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_id)
        config = tf.ConfigProto()
        config.gpu_options.allow_growth = True
        config.allow_soft_placement = True
        set_session(tf.Session(config=config))
    else:
        gpu = '/cpu:0'

    # load data
    mov_nii = nib.load(moving)
    mov = mov_nii.get_data()[np.newaxis, ..., np.newaxis]
    fix_nii = nib.load(fixed)
    fix = fix_nii.get_data()[np.newaxis, ..., np.newaxis]

    with tf.device(gpu):
        # load model
        custom_objects = {
            'SpatialTransformer': nrn_layers.SpatialTransformer,
            'VecInt': nrn_layers.VecInt,
            'Sample': networks.Sample,
            'Rescale': networks.RescaleDouble,
            'Resize': networks.ResizeDouble,
            'Negate': networks.Negate,
            'recon_loss':
            losses.Miccai2018(0.02, 10).recon_loss,  # values shouldn't matter
            'kl_loss': losses.Miccai2018(0.02,
                                         10).kl_loss  # values shouldn't matter
        }

        net = keras.models.load_model(model_file,
                                      custom_objects=custom_objects)

        # register
        [moved, warp] = net.predict([mov, fix])

    # output image
    if out_img is not None:
        img = nib.Nifti1Image(moved[0, ..., 0], mov_nii.affine)
        nib.save(img, out_img)

    # output warp
    if out_warp is not None:
        img = nib.Nifti1Image(warp[0, ...], mov_nii.affine)
        nib.save(img, out_warp)
Esempio n. 2
0
def train(data_dir,
          atlas_file,
          model_dir,
          gpu_id,
          lr,
          nb_epochs,
          prior_lambda,
          image_sigma,
          steps_per_epoch,
          batch_size,
          load_model_file,
          bidir,
          initial_epoch=0):
    """
    model training function
    :param data_dir: folder with npz files for each subject.
    :param atlas_file: atlas filename. So far we support npz file with a 'vol' variable
    :param model_dir: model folder to save to
    :param gpu_id: integer specifying the gpu to use
    :param lr: learning rate
    :param nb_epochs: number of training iterations
    :param prior_lambda: the prior_lambda, the scalar in front of the smoothing laplacian, in MICCAI paper
    :param image_sigma: the image sigma in MICCAI paper
    :param steps_per_epoch: frequency with which to save models
    :param batch_size: Optional, default of 1. can be larger, depends on GPU memory and volume size
    :param load_model_file: optional h5 model file to initialize with
    :param bidir: logical whether to use bidirectional cost function
    """

    # load atlas from provided files. The atlas we used is 160x192x224.
    atlas_vol = np.load(atlas_file)['vol'][np.newaxis, ..., np.newaxis]
    vol_size = atlas_vol.shape[1:-1]
    # prepare data files
    # for the CVPR and MICCAI papers, we have data arranged in train/validate/test folders
    # inside each folder is a /vols/ and a /asegs/ folder with the volumes
    # and segmentations. All of our papers use npz formated data.
    train_vol_names = glob.glob(os.path.join(data_dir, '*.npz'))
    random.shuffle(train_vol_names)  # shuffle volume list
    assert len(train_vol_names) > 0, "Could not find any training data"

    # Diffeomorphic network architecture used in MICCAI 2018 paper
    nf_enc = [16, 32, 32, 32]
    nf_dec = [32, 32, 32, 32, 16, 3]

    # prepare model folder
    if not os.path.isdir(model_dir):
        os.mkdir(model_dir)

    # gpu handling
    gpu = '/gpu:%d' % 0  # gpu_id
    os.environ["CUDA_VISIBLE_DEVICES"] = gpu_id
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    config.allow_soft_placement = True
    set_session(tf.Session(config=config))

    # prepare the model
    with tf.device(gpu):
        # the MICCAI201 model takes in [image_1, image_2] and outputs [warped_image_1, velocity_stats]
        # in these experiments, we use image_2 as atlas
        model = networks.miccai2018_net(vol_size, nf_enc, nf_dec, bidir=bidir)

        # load initial weights
        if load_model_file is not None and load_model_file != "":
            model.load_weights(load_model_file)

        # save first iteration
        model.save(os.path.join(model_dir, '%02d.h5' % initial_epoch))

        # compile
        # note: best to supply vol_shape here than to let tf figure it out.
        flow_vol_shape = model.outputs[-1].shape[1:-1]
        loss_class = losses.Miccai2018(image_sigma,
                                       prior_lambda,
                                       flow_vol_shape=flow_vol_shape)
        if bidir:
            model_losses = [
                loss_class.recon_loss, loss_class.recon_loss,
                loss_class.kl_loss
            ]
            loss_weights = [0.5, 0.5, 1]
        else:
            model_losses = [loss_class.recon_loss, loss_class.kl_loss]
            loss_weights = [1, 1]

    # data generator
    nb_gpus = len(gpu_id.split(','))
    assert np.mod(batch_size, nb_gpus) == 0, \
        'batch_size should be a multiple of the nr. of gpus. ' + \
        'Got batch_size %d, %d gpus' % (batch_size, nb_gpus)

    train_example_gen = datagenerators.example_gen(train_vol_names,
                                                   batch_size=batch_size)
    atlas_vol_bs = np.repeat(atlas_vol, batch_size, axis=0)
    miccai2018_gen = datagenerators.miccai2018_gen(train_example_gen,
                                                   atlas_vol_bs,
                                                   batch_size=batch_size,
                                                   bidir=bidir)

    # prepare callbacks
    save_file_name = os.path.join(model_dir, '{epoch:02d}.h5')

    # fit generator
    with tf.device(gpu):

        # multi-gpu support
        if nb_gpus > 1:
            save_callback = nrn_gen.ModelCheckpointParallel(save_file_name)
            mg_model = multi_gpu_model(model, gpus=nb_gpus)

        # single gpu
        else:
            save_callback = ModelCheckpoint(save_file_name)
            mg_model = model

        mg_model.compile(optimizer=Adam(lr=lr),
                         loss=model_losses,
                         loss_weights=loss_weights)
        mg_model.fit_generator(miccai2018_gen,
                               initial_epoch=initial_epoch,
                               epochs=nb_epochs,
                               callbacks=[save_callback],
                               steps_per_epoch=steps_per_epoch,
                               verbose=1)
def train(data_dir,
          atlas_file,
          model_dir,
          model,
          gpu_id,
          lr,
          nb_epochs,
          prior_lambda,
          image_sigma,
          mean_lambda,
          steps_per_epoch,
          batch_size,
          load_model_file,
          bidir,
          atlas_wt,
          bias_mult,
          smooth_pen_layer,
          data_loss,
          reg_param,
          ncc_win,
          initial_epoch=0):
    """
    model training function
    :param data_dir: folder with npz files for each subject.
    :param atlas_file: atlas filename. So far we support npz file with a 'vol' variable
    :param model_dir: model folder to save to
    :param gpu_id: integer specifying the gpu to use
    :param lr: learning rate
    :param nb_epochs: number of training iterations
    :param prior_lambda: the prior_lambda, the scalar in front of the smoothing laplacian, in MICCAI paper
    :param image_sigma: the image sigma in MICCAI paper
    :param steps_per_epoch: frequency with which to save models
    :param batch_size: Optional, default of 1. can be larger, depends on GPU memory and volume size
    :param load_model_file: optional h5 model file to initialize with
    :param bidir: logical whether to use bidirectional cost function
    """
    
     
    # prepare data files
    # we have data arranged in train/validate/test folders
    # inside each folder is a /vols/ and a /asegs/ folder with the volumes
    # and segmentations. All of our papers use npz formated data.
    train_vol_names = glob.glob(data_dir)
    train_vol_names = [f for f in train_vol_names if 'ADNI' not in f]
    random.shuffle(train_vol_names)  # shuffle volume list
    assert len(train_vol_names) > 0, "Could not find any training data"

    # data generator
    train_example_gen = datagenerators.example_gen(train_vol_names, batch_size=batch_size)

    # prepare the initial weights for the atlas "layer"
    if atlas_file is None or atlas_file == "":
        nb_atl_creation = 100
        print('creating "atlas" by averaging %d subjects' % nb_atl_creation)
        x_avg = 0
        for _ in range(nb_atl_creation):
            x_avg += next(train_example_gen)[0][0,...,0]
        x_avg /= nb_atl_creation

        x_avg = x_avg[np.newaxis,...,np.newaxis]
        atlas_vol = x_avg
    else:
        atlas_vol = np.load(atlas_file)['vol'][np.newaxis, ..., np.newaxis]
    vol_size = atlas_vol.shape[1:-1]

    # Diffeomorphic network architecture used in MICCAI 2018 paper
    nf_enc = [16,32,32,32]
    nf_dec = [32,32,32,32,16,3] 
    if model == 'm1':
        pass
    elif model == 'm1double':
        nf_enc = [f*2 for f in nf_enc]
        nf_dec = [f*2 for f in nf_dec]

    # prepare model folder
    if not os.path.isdir(model_dir):
        os.mkdir(model_dir)


    assert data_loss in ['mse', 'cc', 'ncc'], 'Loss should be one of mse or cc, found %s' % data_loss
    if data_loss in ['ncc', 'cc']:
        data_loss = losses.NCC(win=[ncc_win]*3).loss      
    else:
        data_loss = lambda y_t, y_p: K.mean(K.square(y_t-y_p))

    # gpu handling
    gpu = '/gpu:' + str(gpu_id)
    os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_id)
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    config.allow_soft_placement = True
    set_session(tf.Session(config=config))

    # prepare the model
    with tf.device(gpu):
        # the MICCAI201 model takes in [image_1, image_2] and outputs [warped_image_1, velocity_stats]
        # in these experiments, we use image_2 as atlas
        model = networks.img_atlas_diff_model(vol_size, nf_enc, nf_dec, 
                                            atl_mult=1, bidir=bidir,
                                            smooth_pen_layer=smooth_pen_layer)



        # compile
        mean_layer_loss = lambda _, y_pred: mean_lambda * K.mean(K.square(y_pred))

        flow_vol_shape = model.outputs[-2].shape[1:-1]
        loss_class = losses.Miccai2018(image_sigma, prior_lambda, flow_vol_shape=flow_vol_shape)
        if bidir:
            model_losses = [data_loss,
                            lambda _,y_p: data_loss(model.get_layer('atlas').output, y_p),
                            mean_layer_loss,
                            losses.Grad('l2').loss]
            loss_weights = [atlas_wt, 1-atlas_wt, 1, reg_param]
        else:
            model_losses = [loss_class.recon_loss, loss_class.kl_loss, mean_layer_loss]
            loss_weights = [1, 1, 1]
        model.compile(optimizer=Adam(lr=lr), loss=model_losses, loss_weights=loss_weights)
    
        # set initial weights in model
        model.get_layer('atlas').set_weights([atlas_vol[0,...]])

        # load initial weights. # note this overloads the img_param weights
        if load_model_file is not None and len(load_model_file) > 0:
            model.load_weights(load_model_file, by_name=True)



    # save first iteration
    model.save(os.path.join(model_dir, '%02d.h5' % initial_epoch))

    # atlas_generator specific to this model. Once we're convinced of this, move to datagenerators
    def atl_gen(gen):  
        zero_flow = np.zeros([batch_size, *vol_size, len(vol_size)])
        zero_flow_half = np.zeros([batch_size] + [f//2 for f in vol_size] + [len(vol_size)])
        while 1:
            x2 = next(train_example_gen)[0]
            # TODO: note this is the opposite of train_miccai and it might be confusing.
            yield ([atlas_vol, x2], [x2, atlas_vol, zero_flow, zero_flow])

    atlas_gen = atl_gen(train_example_gen)

    # prepare callbacks
    save_file_name = os.path.join(model_dir, '{epoch:02d}.h5')
    save_callback = ModelCheckpoint(save_file_name)

    # fit generator
    with tf.device(gpu):
        model.fit_generator(atlas_gen, 
                            initial_epoch=initial_epoch,
                            epochs=nb_epochs,
                            callbacks=[save_callback],
                            steps_per_epoch=steps_per_epoch,
                            verbose=1)
def train(data_dir,
          atlas_file,
          model_dir,
          gpu_id,
          lr,
          nb_epochs,
          prior_lambda,
          image_sigma,
          steps_per_epoch,
          batch_size,
          load_model_file,
          bidir,
          bool_cc,
          initial_epoch=0):
    """
    model training function
    :param data_dir: folder with npz files for each subject.
    :param atlas_file: atlas filename. So far we support npz file with a 'vol' variable
    :param model_dir: model folder to save to
    :param gpu_id: integer specifying the gpu to use
    :param lr: learning rate
    :param nb_epochs: number of training iterations
    :param prior_lambda: the prior_lambda, the scalar in front of the smoothing laplacian, in MICCAI paper
    :param image_sigma: the image sigma in MICCAI paper
    :param steps_per_epoch: frequency with which to save models
    :param batch_size: Optional, default of 1. can be larger, depends on GPU memory and volume size
    :param load_model_file: optional h5 model file to initialize with
    :param bidir: logical whether to use bidirectional cost function
    :param bool_cc: Train CC or MICCAI version
    """

    # load atlas from provided files. The atlas we used is 160x192x224.
    #atlas_vol = np.load(atlas_file)['vol'][np.newaxis, ..., np.newaxis]
    vm_dir = '/home/jdram/voxelmorph/'
    base = np.load(
        os.path.join(vm_dir, "data",
                     "ts12_dan_a88_fin_o_trim_adpc_002661_256.npy"))
    monitor = np.load(
        os.path.join(vm_dir, "data",
                     "ts12_dan_a05_fin_o_trim_adpc_002682_256.npy"))
    #base    = np.load(os.path.join(vm_dir, "data","ts12_dan_a88_fin_o_trim_adpc_002661_abs.npy"))
    #monitor = np.load(os.path.join(vm_dir, "data","ts12_dan_a05_fin_o_trim_adpc_002682_abs.npy"))

    #vol_size = (64, 64, 64)
    vol_size = (64, 64, 256 - 64)
    #vol_size = (128, 128, 256)

    # prepare data files
    # for the CVPR and MICCAI papers, we have data arranged in train/validate/test folders
    # inside each folder is a /vols/ and a /asegs/ folder with the volumes
    # and segmentations. All of our papers use npz formated data.
    #train_vol_names = glob.glob(os.path.join(data_dir, '*.npy'))
    #random.shuffle(train_vol_names)  # shuffle volume list
    #assert len(train_vol_names) > 0, "Could not find any training data"

    # Diffeomorphic network architecture used in MICCAI 2018 paper
    nf_enc = [32, 64, 64, 64]
    nf_dec = [64, 64, 64, 64, 32, 3]

    # prepare model folder
    if not os.path.isdir(model_dir):
        os.mkdir(model_dir)
    tf.reset_default_graph()

    if bool_cc:
        pre_net = "cc_"
    else:
        if bidir:
            pre_net = "miccai_bidir_"
        else:
            pre_net = "miccai_"

    # gpu handling
    gpu = '/device:GPU:%d' % int(gpu_id)  # gpu_id
    os.environ["CUDA_VISIBLE_DEVICES"] = gpu_id
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    config.allow_soft_placement = True
    set_session(tf.Session(config=config))

    # prepare the model
    with tf.device(gpu):
        # prepare the model
        # in the CVPR layout, the model takes in [image_1, image_2] and outputs [warped_image_1, flow]
        # in the experiments, we use image_2 as atlas
        if bool_cc:
            model = networks.cvpr2018_net(vol_size, nf_enc, nf_dec)
        else:
            model = networks.miccai2018_net(vol_size,
                                            nf_enc,
                                            nf_dec,
                                            bidir=bidir,
                                            vel_resize=.5)

        # load initial weights
        if load_model_file is not None and load_model_file != "":
            print('loading', load_model_file)
            model.load_weights(load_model_file)

        # save first iteration
        model.save(os.path.join(model_dir, f'{pre_net}{initial_epoch:02d}.h5'))
        model.summary()

        if bool_cc:
            model_losses = [losses.NCC().loss, losses.Grad('l2').loss]
            loss_weights = [1.0, 0.01]  # recommend 1.0 for ncc, 0.01 for mse
        else:
            flow_vol_shape = model.outputs[-1].shape[1:-1]
            loss_class = losses.Miccai2018(image_sigma,
                                           prior_lambda,
                                           flow_vol_shape=flow_vol_shape)
            if bidir:
                model_losses = [
                    loss_class.recon_loss, loss_class.recon_loss,
                    loss_class.kl_loss
                ]
                loss_weights = [0.5, 0.5, 1]
            else:
                model_losses = [loss_class.recon_loss, loss_class.kl_loss]
                loss_weights = [1, 1]

    segy_gen = datagenerators.segy_gen(base, monitor, batch_size=batch_size)

    # prepare callbacks
    save_file_name = os.path.join(model_dir, pre_net + '{epoch:02d}.h5')

    with tf.device(gpu):
        # fit generator
        save_callback = ModelCheckpoint(save_file_name, period=5)
        csv_cb = CSVLogger(f'{pre_net}log.csv')
        nan_cb = TerminateOnNaN()
        rlr_cb = ReduceLROnPlateau(monitor='loss', verbose=1)
        els_cb = EarlyStopping(monitor='loss',
                               patience=15,
                               verbose=1,
                               restore_best_weights=True)
        cbs = [save_callback, csv_cb, nan_cb, rlr_cb, els_cb]
        mg_model = model

        # compile
        mg_model.compile(optimizer=Adam(lr=lr),
                         loss=model_losses,
                         loss_weights=loss_weights)

        mg_model.fit(
            [base, monitor],
            [monitor, np.zeros_like(base)],
            initial_epoch=initial_epoch,
            batch_size=8,
            epochs=nb_epochs,
            callbacks=cbs,
            #steps_per_epoch=steps_per_epoch,
            verbose=1)
Esempio n. 5
0
def train(data_dir,
          model_dir,
          gpu_id,
          lr,
          nb_epochs,
          prior_lambda,
          image_sigma,
          steps_per_epoch,
          batch_size,
          load_model_file,
          bidir,
          initial_epoch=0):
    """
    model training function
    :param data_dir: folder with npz files for each subject.
    :param model_dir: model folder to save to
    :param gpu_id: integer specifying the gpu to use
    :param lr: learning rate
    :param nb_epochs: number of training iterations
    :param prior_lambda: the prior_lambda, the scalar in front of the smoothing laplacian, in MICCAI paper
    :param image_sigma: the image sigma in MICCAI paper
    :param steps_per_epoch: frequency with which to save model
    :param batch_size: Optional, default of 1. can be larger, depends on GPU memory and volume size
    :param load_model_file: optional h5 model file to initialize with
    :param bidir: logical whether to use bidirectional cost function
    """

    vol_size = (160, 192, 224)

    # Fisrt 30 images are used as training set, and the rest 10 images are testing set.
    constrain = list(range(31, 41, 1))
    moving_path = []
    list_dirs = os.walk(data_dir)
    for root, dirs, files in list_dirs:
        for f in files:
            if f.endswith(".hdr") and f.startswith("l"):
                for c in constrain:
                    if "l{}_".format(str(c)) in str(f) or "_l{}.".format(
                            str(c)) in str(f):
                        break
                else:
                    moving_path.append(os.path.join(data_dir, f))

    train_vol_names = moving_path
    random.shuffle(train_vol_names)  # shuffle volume list
    assert len(train_vol_names) > 0, "Could not find any training data"

    nf_enc = [16, 32, 32, 32]
    nf_dec = [32, 32, 32, 32, 16, 3]

    # prepare model folder
    if not os.path.isdir(model_dir):
        os.mkdir(model_dir)

    # gpu handling
    gpu = '/gpu:%d' % 0  # gpu_id
    os.environ["CUDA_VISIBLE_DEVICES"] = gpu_id
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    config.allow_soft_placement = True
    set_session(tf.Session(config=config))

    # prepare the model
    with tf.device(gpu):
        model = networks_lpba40.miccai2018_net(vol_size,
                                               nf_enc,
                                               nf_dec,
                                               bidir=bidir)

        # load initial weights
        if load_model_file is not None and load_model_file != "":
            print("load file from {}".format(load_model_file))
            model.load_weights(load_model_file)

        # save first iteration
        model.save(os.path.join(model_dir, '%02d.h5' % initial_epoch))

        # compile
        flow_vol_shape = model.outputs[1].shape[1:-1]
        loss_class = losses.Miccai2018(image_sigma,
                                       prior_lambda,
                                       flow_vol_shape=flow_vol_shape)
        if bidir:
            model_losses = [
                loss_class.recon_loss, loss_class.recon_loss,
                loss_class.kl_loss
            ]
            loss_weights = [0.5, 0.5, 1]
        else:
            model_losses = [
                loss_class.recon_loss, loss_class.kl_loss,
                loss_class.kl_loss_1, loss_class.kl_loss_2,
                loss_class.kl_loss_3, loss_class.mse_loss, loss_class.mse_loss,
                loss_class.mse_loss
            ]
            loss_weights = [1, 1, 0.5, 0.5, 0.5, 1, 1, 1]

    # data generator
    nb_gpus = len(gpu_id.split(','))
    assert np.mod(batch_size, nb_gpus) == 0, \
        'batch_size should be a multiple of the nr. of gpus. ' + \
        'Got batch_size %d, %d gpus' % (batch_size, nb_gpus)

    train_example_gen = datagenerators_lpba40.example_gen_lpba40(
        train_vol_names, batch_size=batch_size, image_path=data_dir)
    miccai2018_gen_lpba40 = datagenerators_lpba40.miccai2018_gen_lpba40(
        train_example_gen, vol_size, batch_size=batch_size, bidir=bidir)

    # prepare callbacks
    save_file_name = os.path.join(model_dir, '{epoch:02d}.h5')

    # fit generator
    with tf.device(gpu):

        # multi-gpu support
        if nb_gpus > 1:
            save_callback = nrn_gen.ModelCheckpointParallel(save_file_name)
            mg_model = multi_gpu_model(model, gpus=nb_gpus)

        # single gpu
        else:
            save_callback = ModelCheckpoint(save_file_name)
            mg_model = model

        mg_model.compile(optimizer=Adam(lr=lr),
                         loss=model_losses,
                         loss_weights=loss_weights)
        mg_model.fit_generator(miccai2018_gen_lpba40,
                               initial_epoch=initial_epoch,
                               epochs=nb_epochs,
                               callbacks=[save_callback],
                               steps_per_epoch=steps_per_epoch,
                               verbose=1)
Esempio n. 6
0
def train(model_dir,
          gpu_id,
          lr,
          nb_epochs,
          prior_lambda,
          image_sigma,
          steps_per_epoch,
          batch_size,
          load_model_file,
          bidir,
          initial_epoch=0):
    """
    model training function
    :param data_dir: folder with npz files for each subject.
    :param model_dir: model folder to save to
    :param gpu_id: integer specifying the gpu to use
    :param lr: learning rate
    :param nb_epochs: number of training iterations
    :param prior_lambda: the prior_lambda, the scalar in front of the smoothing laplacian, in MICCAI paper
    :param image_sigma: the image sigma in MICCAI paper
    :param steps_per_epoch: frequency with which to save models
    :param batch_size: Optional, default of 1. can be larger, depends on GPU memory and volume size
    :param load_model_file: optional h5 model file to initialize with
    :param bidir: logical whether to use bidirectional cost function
    """

    # Diffeomorphic network architecture used in VoxelMorph MICCAI 2018 paper
    nf_enc = [16, 32, 32, 32]
    nf_dec = [32, 32, 32, 32, 16, 3]

    # prepare model folder
    if not os.path.isdir(model_dir):
        os.mkdir(model_dir)

    # gpu handling
    gpu = '/gpu:%d' % 0  # gpu_id
    os.environ["CUDA_VISIBLE_DEVICES"] = gpu_id
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    config.allow_soft_placement = True
    set_session(tf.Session(config=config))

    # prepare the model
    with tf.device(gpu):
        model = networks.Reg_Net(vol_size, nf_enc, nf_dec, bidir=bidir)
        flow = model.get_layer('flow-int').output
        [src, tgt] = model.inputs
        [y, flow_parameters] = model.outputs
        model = Model(inputs=[src, tgt], outputs=[y, flow_parameters, flow])
        # load initial weights
        if load_model_file is not None and load_model_file != "":
            model.load_weights(load_model_file)

        # save first iteration
        model.save(os.path.join(model_dir, '%02d.h5' % initial_epoch))

        # compile
        # note: best to supply vol_shape here than to let tf figure it out.
        flow_vol_shape = model.outputs[-2].shape[1:-1]
        loss_class = losses.Miccai2018(image_sigma,
                                       prior_lambda,
                                       flow_vol_shape=flow_vol_shape)
        if bidir:
            model_losses = [
                loss_class.recon_loss, loss_class.recon_loss,
                loss_class.kl_loss
            ]
            loss_weights = [0.5, 0.5, 1]
        else:
            model_losses = [
                loss_class.recon_loss, loss_class.kl_loss,
                losses_user.Sparse_Loss(
                    tissue=src,
                    loss_weights=1,
                ).loss
            ]
            loss_weights = [1, 1, 1]

    # data generator
    nb_gpus = len(gpu_id.split(','))
    assert np.mod(batch_size, nb_gpus) == 0, \
        'batch_size should be a multiple of the nr. of gpus. ' + \
        'Got batch_size %d, %d gpus' % (batch_size, nb_gpus)

    data_gen = datagenerators.data_generator_vertices(train_vol_names,
                                                      vol_size)

    # prepare callbacks
    save_file_name = os.path.join(model_dir, '{epoch:02d}.h5')
    log_dir = "./logs/" + model_dir[9:] + datetime.datetime.now().strftime(
        "%Y%m%d-%H%M%S")
    tensorboard_callback = keras.callbacks.TensorBoard(log_dir=log_dir,
                                                       histogram_freq=0)
    # fit generator
    with tf.device(gpu):

        # multi-gpu support
        if nb_gpus > 1:
            save_callback = nrn_gen.ModelCheckpointParallel(save_file_name)
            mg_model = multi_gpu_model(model, gpus=nb_gpus)

        # single gpu
        else:
            save_callback = ModelCheckpoint(save_file_name)
            mg_model = model

        mg_model.compile(optimizer=Adam(lr=lr),
                         loss=model_losses,
                         loss_weights=loss_weights)
        mg_model.fit_generator(data_gen,
                               initial_epoch=initial_epoch,
                               epochs=nb_epochs,
                               callbacks=[save_callback, tensorboard_callback],
                               steps_per_epoch=steps_per_epoch,
                               verbose=1)
Esempio n. 7
0
def register(gpu_id, moving, fixed, model_file, out_img, out_warp):
    """
    register moving and fixed. 
    """
    assert model_file, "A model file is necessary"
    assert out_img or out_warp, "output image or warp file needs to be specified"

    # GPU handling
    if gpu_id is not None:
        gpu = '/gpu:' + str(gpu_id)
        os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_id)
        config = tf.ConfigProto()
        config.gpu_options.allow_growth = True
        config.allow_soft_placement = True
        set_session(tf.Session(config=config))
    else:
        gpu = '/cpu:0'

    # load data
    if_normalize_moving = True
    mov_nii = nib.load(moving)
    mov = mov_nii.get_data()[np.newaxis, ..., np.newaxis]
    if if_normalize_moving:
        # Typically the CTs that we used for other stuff are not clipped+normalized, that is required
        # hard-coded range and norm, the same as in "convert_niftis_to_npz.py"
        clip_range = [0, 100]  # HU
        mov_orig = mov
        mov[mov < clip_range[0]] = clip_range[0]
        mov[mov > clip_range[1]] = clip_range[1]
        mov = mov / np.max(mov)

    fix_nii = nib.load(fixed)
    fix = fix_nii.get_data()[np.newaxis, ..., np.newaxis]

    print('\n===\nLoading "moving" data (as in your input CT) from {}'.format(
        moving))
    print(' mov.shape:', mov.shape)
    print(' mov (input):', np.amin(mov_orig), ',', np.amax(mov_orig), ']')
    print(' mov (intensity range, after clipping+normalization):',
          np.amin(mov), ',', np.amax(mov), ']')
    print('Loading "fixed" data (as in your CT atlas/template) from {}'.format(
        fixed))
    print(' fix.shape:', fix.shape)
    print(' fix (intensity range):', np.amin(fix), ',', np.amax(fix), ']')

    image_sigma = 0.02
    prior_lambda = 10

    flow_vol_shape = None  # Petteri: this was the default

    # PETTERI: Without this, not exactly sure why the shape needs to be 1/2 of the input shape
    # TODO! check from paper, or inspect code a bit better
    flow_vol_shape = (fix.shape[1] / 2, fix.shape[2] / 2, fix.shape[3] / 2)

    with tf.device(gpu):
        # load model
        custom_objects = {
            'SpatialTransformer':
            nrn_layers.SpatialTransformer,
            'VecInt':
            nrn_layers.VecInt,
            'Sample':
            networks.Sample,
            'Rescale':
            networks.RescaleDouble,
            'Resize':
            networks.ResizeDouble,
            'Negate':
            networks.Negate,
            'recon_loss':
            losses.Miccai2018(image_sigma,
                              prior_lambda,
                              flow_vol_shape=flow_vol_shape
                              ).recon_loss,  # values shouldn't matter
            'kl_loss':
            losses.Miccai2018(image_sigma,
                              prior_lambda,
                              flow_vol_shape=flow_vol_shape).
            kl_loss  # values shouldn't matter
        }

        print('Loading pre-trained model for registration from {}'.format(
            model_file))
        print('\n===\n')
        net = keras.models.load_model(model_file,
                                      custom_objects=custom_objects)

        # register
        [moved, warp] = net.predict([mov, fix])

    # output image
    if out_img is not None:
        img = nib.Nifti1Image(moved[0, ..., 0], mov_nii.affine)
        nib.save(img, out_img)

    # output warp
    if out_warp is not None:
        img = nib.Nifti1Image(warp[0, ...], mov_nii.affine)
        nib.save(img, out_warp)