Exemplo n.º 1
0
def train(model_name, gpu_id):

    model_dir = '../models/' + model_name
    if not os.path.isdir(model_dir):
        os.mkdir(model_dir)

    gpu = '/gpu:' + str(gpu_id)
    os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_id)
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    config.allow_soft_placement = True
    set_session(tf.Session(config=config))

    with tf.device(gpu):
        model = networks.unet(vol_size, nf_enc, nf_dec)
        model.compile(optimizer=Adam(lr=lr), loss=[
                      losses.cc3D(), losses.gradientLoss('l2')], loss_weights=[1.0, reg_param])
        # model.load_weights('../models/udrnet2/udrnet1_1/120000.h5')

    train_example_gen = datagenerators.example_gen(train_vol_names)
    zero_flow = np.zeros((1, vol_size[0], vol_size[1], vol_size[2], 3))

    for step in xrange(0, n_iterations):

        X = train_example_gen.next()[0]
        train_loss = model.train_on_batch(
            [X, atlas_vol], [atlas_vol, zero_flow])

        if not isinstance(train_loss, list):
            train_loss = [train_loss]

        printLoss(step, 1, train_loss)

        if(step % model_save_iter == 0):
            model.save(model_dir + '/' + str(step) + '.h5')
Exemplo n.º 2
0
def train(model, gpu_id, lr, n_iterations, reg_param, model_save_iter,
          load_iter):

    model_dir = '/home/ys895/MAS3_Models'
    if not os.path.isdir(model_dir):
        os.mkdir(model_dir)

    gpu = '/gpu:' + str(gpu_id)
    os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_id)
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    config.allow_soft_placement = True
    set_session(tf.Session(config=config))

    # UNET filters
    nf_enc = [16, 32, 32, 32]
    if (model == 'vm1'):
        nf_dec = [32, 32, 32, 32, 8, 8, 3]
    else:
        nf_dec = [32, 32, 32, 32, 32, 16, 16, 3]

    with tf.device(gpu):
        model = networks.unet(vol_size, nf_enc, nf_dec)
        if (load_iter != 0):
            model.load_weights('/home/ys895/MAS3_Models/' + str(load_iter) +
                               '.h5')

        model.compile(optimizer=Adam(lr=lr),
                      loss=[losses.cc3D(),
                            losses.gradientLoss('l2')],
                      loss_weights=[1.0, reg_param])
        # model.load_weights('../models/udrnet2/udrnet1_1/120000.h5')

    # return the data, add one more dimension into the data
    train_example_gen = datagenerators.example_gen(train_vol_names)
    zero_flow = np.zeros((1, vol_size[0], vol_size[1], vol_size[2], 3))

    # In this part, the code inputs the data into the model
    # Before this part, the model was set
    for step in range(1, n_iterations + 1):
        # choose randomly one of the atlas from the atlas_list
        rand_num = random.randint(0, list_num - 1)
        atlas_vol = atlas_list[rand_num]

        #Parameters for training : X(train_vol) ,atlas_vol(atlas) ,zero_flow
        X = train_example_gen.__next__()[0]
        train_loss = model.train_on_batch([atlas_vol, X], [X, zero_flow])

        if not isinstance(train_loss, list):
            train_loss = [train_loss]

        printLoss(step, 1, train_loss)

        if (step % model_save_iter == 0):
            model.save(model_dir + '/' + str(load_iter + step) + '.h5')
Exemplo n.º 3
0
def train(model,save_name, gpu_id, lr, n_iterations, reg_param, model_save_iter):

    model_dir = '../models/' + save_name
    if not os.path.isdir(model_dir):
        os.mkdir(model_dir)

    gpu = '/gpu:' + str(gpu_id)
    os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_id)
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    config.allow_soft_placement = True
    set_session(tf.Session(config=config))


    # UNET filters
    nf_enc = [16,32,32,32]
    if(model == 'vm1'):
        nf_dec = [32,32,32,32,8,8,3]
    else:
        nf_dec = [32,32,32,32,32,16,16,3]

    with tf.device(gpu):
        model = networks.unet(vol_size, nf_enc, nf_dec)
        model.compile(optimizer=Adam(lr=lr), loss=[losses.cc3D(), losses.gradientLoss('l2')], loss_weights=[1.0, reg_param])
        # model.load_weights('../models/udrnet2/udrnet1_1/120000.h5')

    zeroflow = np.zeros((1, vol_size[0], vol_size[1], vol_size[2], 3))

    
    for step in range(0, n_iterations):

        sub = np.load(train_pairs[step % (noftrain ** 2)][0])
        sub = np.reshape(sub, (1,) + sub.shape + (1,))
        tmp = np.load(train_pairs[step % (noftrain ** 2)][1])
        tmp = np.reshape(tmp, (1,) + tmp.shape + (1,))
        
        train_loss = model.train_on_batch([sub, tmp], [tmp, zeroflow])

        printLoss(step, train_loss, keras.get_value(model.optimizer.lr))

        if(step % model_save_iter == 0):
            model.save(model_dir + '/' + str(step) + '.h5')
        if(step % (2*(noftrain ** 2)) == 0 and step > 0):           
            keras.set_value(model.optimizer.lr, keras.get_value(model.optimizer.lr) / 2)
Exemplo n.º 4
0
def train(model,save_name, gpu_id, lr, n_iterations, reg_param, model_save_iter):

    model_dir = '../models/' + save_name
    if not os.path.isdir(model_dir):
        os.mkdir(model_dir)

    gpu = '/gpu:' + str(gpu_id)
    os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_id)
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    config.allow_soft_placement = True
    set_session(tf.Session(config=config))


    # UNET filters
    nf_enc = [16,32,32,32]
    if(model == 'vm1'):
        nf_dec = [32,32,32,32,8,8,3]
    else:
        nf_dec = [32,32,32,32,32,16,16,3]

    with tf.device(gpu):
        model = networks.unet(vol_size, nf_enc, nf_dec)
        model.compile(optimizer=Adam(lr=lr), loss=[
                      losses.cc3D(), losses.gradientLoss('l2')], loss_weights=[1.0, reg_param])
        # model.load_weights('../models/udrnet2/udrnet1_1/120000.h5')

    train_example_gen = datagenerators.example_gen(train_vol_names)
    zero_flow = np.zeros((1, vol_size[0], vol_size[1], vol_size[2], 3))

    for step in range(0, n_iterations):

        X = train_example_gen.__next__()[0]
        train_loss = model.train_on_batch(
            [X, atlas_vol], [atlas_vol, zero_flow])

        if not isinstance(train_loss, list):
            train_loss = [train_loss]

        printLoss(step, 1, train_loss)

        if(step % model_save_iter == 0):
            model.save(model_dir + '/' + str(step) + '.h5')
Exemplo n.º 5
0
def train(model,
          pretrained_path,
          model_name,
          gpu_id,
          lr,
          n_iterations,
          use_mi,
          gamma,
          num_bins,
          patch_size,
          max_clip,
          reg_param,
          model_save_iter,
          local_mi,
          sigma_ratio,
          batch_size=1):
    """
    model training function
    :param model: either vm1 or vm2 (based on CVPR 2018 paper)
    :param model_dir: the model directory to save to
    :param gpu_id: integer specifying the gpu to use
    :param lr: learning rate
    :param n_iterations: number of training iterations
    :param reg_param: the smoothness/reconstruction tradeoff parameter (lambda in CVPR paper)
    :param model_save_iter: frequency with which to save models
    :param batch_size: Optional, default of 1. can be larger, depends on GPU memory and volume size
    """
    os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'

    restrict_GPU_tf(str(gpu_id))
    restrict_GPU_keras(str(gpu_id))

    train_labels = sio.loadmat('../data/labels.mat')['labels'][0]
    n_labels = train_labels.shape[0]

    normalized_atlas_vol = atlas_vol / np.max(atlas_vol) * max_clip

    atlas_seg = datagenerators.split_seg_into_channels(seg, train_labels)
    atlas_seg = datagenerators.downsample(atlas_seg)

    model_dir = "../models/" + model_name
    # prepare model folder
    if not os.path.isdir(model_dir):
        os.mkdir(model_dir)

    # GPU handling
    os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_id)
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    config.allow_soft_placement = True
    set_session(tf.Session(config=config))

    # UNET filters for voxelmorph-1 and voxelmorph-2,
    # these are architectures presented in CVPR 2018
    nf_enc = [16, 32, 32, 32]
    if model == 'vm1':
        nf_dec = [32, 32, 32, 32, 8, 8]
    else:
        nf_dec = [32, 32, 32, 32, 32, 16, 16]

    # prepare the model
    # in the CVPR layout, the model takes in [image_1, image_2] and outputs [warped_image_1, flow]
    # in the experiments, we use image_2 as atlas

    bin_centers = np.linspace(0, max_clip, num_bins * 2 + 1)[1::2]
    loss_function = losses.mutualInformation(bin_centers,
                                             max_clip=max_clip,
                                             local_mi=local_mi,
                                             patch_size=patch_size,
                                             sigma_ratio=sigma_ratio)

    model = networks.cvpr2018_net(vol_size,
                                  nf_enc,
                                  nf_dec,
                                  use_seg=True,
                                  n_seg=len(train_labels))
    model.compile(optimizer=Adam(lr=lr),
                  loss=[
                      loss_function,
                      losses.gradientLoss('l2'),
                      sparse_categorical_crossentropy
                  ],
                  loss_weights=[1 if use_mi else 0, reg_param, gamma])

    # if you'd like to initialize the data, you can do it here:
    if pretrained_path != None and pretrained_path != '':
        model.load_weights(pretrained_path)

    # prepare data for training
    train_example_gen = datagenerators.example_gen(train_vol_names,
                                                   return_segs=True,
                                                   seg_dir=train_seg_dir)
    zero_flow = np.zeros([batch_size, *vol_size, 3])

    # train. Note: we use train_on_batch and design out own print function as this has enabled
    # faster development and debugging, but one could also use fit_generator and Keras callbacks.
    for step in range(0, n_iterations):

        # get data
        X = next(train_example_gen)
        X_seg = X[1]

        X_seg = datagenerators.split_seg_into_channels(X_seg, train_labels)
        X_seg = datagenerators.downsample(X_seg)

        # train
        train_loss = model.train_on_batch(
            [X[0], normalized_atlas_vol, X_seg],
            [normalized_atlas_vol, zero_flow, atlas_seg])
        if not isinstance(train_loss, list):
            train_loss = [train_loss]

        # print the loss.
        print_loss(step, 1, train_loss)

        # save model
        if step % model_save_iter == 0:
            model.save(os.path.join(model_dir, str(step) + '.h5'))
Exemplo n.º 6
0
def train(model,
          model_dir,
          gpu_id,
          lr,
          n_iterations,
          reg_param,
          model_save_iter,
          batch_size=1):
    """
    model training function
    :param model: either vm1 or vm2 (based on CVPR 2018 paper)
    :param model_dir: the model directory to save to
    :param gpu_id: integer specifying the gpu to use
    :param lr: learning rate
    :param n_iterations: number of training iterations
    :param reg_param: the smoothness/reconstruction tradeoff parameter (lambda in CVPR paper)
    :param model_save_iter: frequency with which to save models
    :param batch_size: Optional, default of 1. can be larger, depends on GPU memory and volume size
    """

    # prepare model folder
    if not os.path.isdir(model_dir):
        os.mkdir(model_dir)

    # GPU handling
    os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_id)
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    config.allow_soft_placement = True
    set_session(tf.Session(config=config))

    # UNET filters for voxelmorph-1 and voxelmorph-2,
    # these are architectures presented in CVPR 2018
    nf_enc = [16, 32, 32, 32]
    if model == 'vm1':
        nf_dec = [32, 32, 32, 32, 8, 8]
    else:
        nf_dec = [32, 32, 32, 32, 32, 16, 16]

    # prepare the model
    # in the CVPR layout, the model takes in [image_1, image_2] and outputs [warped_image_1, flow]
    # in the experiments, we use image_2 as atlas
    model = networks.unet(vol_size, nf_enc, nf_dec)
    model.compile(optimizer=Adam(lr=lr),
                  loss=[losses.cc3D(),
                        losses.gradientLoss('l2')],
                  loss_weights=[1.0, reg_param])

    # if you'd like to initialize the data, you can do it here:
    # model.load_weights(os.path.join(model_dir, '120000.h5'))

    # prepare data for training
    train_example_gen = datagenerators.example_gen(train_vol_names)
    zero_flow = np.zeros([batch_size, *vol_size, 3])

    # train. Note: we use train_on_batch and design out own print function as this has enabled
    # faster development and debugging, but one could also use fit_generator and Keras callbacks.
    for step in range(0, n_iterations):

        # get data
        X = next(train_example_gen)[0]

        # train
        train_loss = model.train_on_batch([X, atlas_vol],
                                          [atlas_vol, zero_flow])
        if not isinstance(train_loss, list):
            train_loss = [train_loss]

        # print the loss.
        print_loss(step, 1, train_loss)

        # save model
        if step % model_save_iter == 0:
            model.save(os.path.join(model_dir, str(step) + '.h5'))
Exemplo n.º 7
0
    validation_list.append(val_files[ind[0]][:3] + val_files[ind[1]][:3])
    validation_list.append(val_files[ind[1]][:3] + val_files[ind[0]][:3])

gen_train = vol_generator2(datapath, train_list, batch_size)
gen_test = vol_generator2(datapath, validation_list, batch_size)

# training
#history = sdn_refine.fit_generator(gen_train, steps_per_epoch = len(train_list)/batch_size, epochs = epochs, use_multiprocessing = True, verbose=1, validation_data = gen_test, validation_steps = len(validation_list)/batch_size)
#loss = history.history['loss']
#val_loss = history.history['val_loss']

for i in range(3):
    set_trainable(sdn, True)
    print("Weights in sdn is trainable: {}".format(sdn.trainable))
    sdn.compile(
        loss=[losses.cc3D(), losses.gradientLoss('l2')],
        loss_weights=par['loss_weights'],
        #            metrics = [rec_img_loss, reg_grad],
        optimizer=Adam(lr=par['lr'], decay=1e-5))
    sdn.fit_generator(gen_train,
                      steps_per_epoch=len(train_list) / batch_size,
                      epochs=3,
                      use_multiprocessing=True,
                      verbose=1,
                      validation_data=gen_test,
                      validation_steps=len(validation_list) / batch_size)
    print('\n ith training in sdn done.\n')
    set_trainable(sdn, False)
    print("Weights in sdn is trainable: {}".format(sdn.trainable))
    print("Weights in sdn model of sdn_refine is trainable: {}".format(
        sdn_refine.layers[-3].trainable))
Exemplo n.º 8
0
def train(model,save_name, gpu_id, lr, n_iterations, reg_param, model_save_iter, alpha):

    model_dir = '../models/' + save_name
    if not os.path.isdir(model_dir):
        os.mkdir(model_dir)

    gpu = '/gpu:' + str(gpu_id)
    os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_id)
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    config.allow_soft_placement = True
    set_session(tf.Session(config=config))


    # UNET filters
    nf_enc = [16,32,32,32]
    if(model == 'vm1'):
        nf_dec = [32,32,32,32,8,8,3]
    else:
        nf_dec = [32,32,32,32,32,16,16,3]
        
    nf = [0, 8, 16, 32, 64]

    with tf.device(gpu):
        deformer = networks.unet(vol_size, nf_enc, nf_dec)
        #deformer.compile(optimizer=Adam(lr=lr), loss=[losses.cc3D(), losses.gradientLoss('l2')], loss_weights=[1.0, reg_param])
        # model.load_weights('../models/udrnet2/udrnet1_1/120000.h5')
        
        discriminator = networks.similarity_net(vol_size, nf)
        discriminator.compile(optimizer=Adam(lr), loss='binary_crossentropy')
        discriminator.trainable = False
        
        # Build GAN model
        src = Input(shape=vol_size + (1,))
        tgt = Input(shape=vol_size + (1,))
        [warp, df] = deformer([src,tgt])
        #pdb.set_trace()
        sim_p = discriminator([warp, tgt])

        GAN = Model([src, tgt], [sim_p, df])
        GAN.compile(optimizer=Adam(lr), loss=['binary_crossentropy',losses.gradientLoss('l2')], loss_weights=[1.0, reg_param])
        GAN.summary()
        
    sz = sim_p.shape.as_list()
    sz[0] = 1
    p_one = np.ones(sz)
    p_zero = np.zeros(sz)
    zeroflow = np.zeros((1, vol_size[0], vol_size[1], vol_size[2], 3))

    for step in range(0, n_iterations):
        print 'Epoch ' + str(step // (n_pairs)) + ', Total iterations ' + str(step) + ', lr= ' + str(keras.get_value(discriminator.optimizer.lr)) + ':'
        
        sub = np.load(train_pairs[step % (n_pairs)][0])
        sub = np.reshape(sub, (1,) + sub.shape + (1,))
        tmp = np.load(train_pairs[step % (n_pairs)][1])
        tmp = np.reshape(tmp, (1,) + tmp.shape + (1,))
        
        ref_sub = np.load(ref_pairs[step % (n_ref)][0])
        ref_sub = np.reshape(ref_sub, (1,) + ref_sub.shape + (1,))
        ref_tmp = np.load(ref_pairs[step % (n_ref)][1])
        ref_tmp = np.reshape(ref_tmp, (1,) + ref_tmp.shape + (1,))
        
        
        ## ----------------  Train deformer --------------------------------##
        keras.set_value(GAN.optimizer.lr, lr)
        g_loss = GAN.train_on_batch([sub, tmp],[p_one, zeroflow]) 
        print '  Train deformer: ' + str(g_loss[1])        
        print '  Regularization: ' + str(g_loss[2])
            
        
        ## ----------------  Train discriminator for reference --------------##
        fused = alpha * sub + (1-alpha) * tmp
        d_loss1 = discriminator.test_on_batch([fused, tmp], p_one)
        ## --------------------Tricks of blancing Registor and Discriminator-------------#
        if d_loss1 > 0.6:
            keras.set_value(discriminator.optimizer.lr, lr)
        elif d_loss1 > 0.4:
            keras.set_value(discriminator.optimizer.lr, lr * 0.1)
        elif d_loss1 > 0.2:
            keras.set_value(discriminator.optimizer.lr, lr * 0.01)
        else:
            keras.set_value(discriminator.optimizer.lr, lr * 0)
        d_loss1 = discriminator.train_on_batch([fused, tmp], p_one)
        print '  Test discriminator for positive sample: ' + str(d_loss1)    
            
            
        ## ----------------  Train discriminator for deformer --------------##
        [warped, deform] = deformer.predict([sub, tmp])    
        d_loss0 = discriminator.test_on_batch([warped, tmp], p_zero)
        if d_loss0 > 0.6:
            keras.set_value(discriminator.optimizer.lr, lr)
        elif d_loss0 > 0.4:
            keras.set_value(discriminator.optimizer.lr, lr * 0.1)
        elif d_loss0 > 0.2:
            keras.set_value(discriminator.optimizer.lr, lr * 0.01)
        else:
            keras.set_value(discriminator.optimizer.lr, lr * 0)
        d_loss0 = discriminator.train_on_batch([warped, tmp], p_zero)
        print '  Test discriminator for negative sample: ' + str(d_loss0)                

            
        if(step % model_save_iter == 0):
            deformer.save(model_dir + '/' + str(step) + '.h5')
            
        #if(step % (20 * n_pairs) == 0 and step > 0):           
            #lr = lr / 2
            #alpha = np.abs(alpha - 0.05)

        sys.stdout.flush()
Exemplo n.º 9
0
 def Loss(yTrue, yPred):
     return losses.gradientLoss('l2')(
         yTrue, yPred) + losses.gradientLoss('l2')(0, backward_flow)
def reg_loss(y_true, y_pred):
    return gradientLoss('l2')(
        y_true, y_pred) + par['NJ loss'] * NJ_loss(y_true, y_pred)