Beispiel #1
0
vis = VIS(save_path=opt.load_from_checkpoint)

# configuration session
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)

# define data loader
img_shape = [opt.imSize, opt.imSize]
label_classes = vis.palette_info()
print('++++++++++++++++++++++++class')
print(label_classes)
test_generator, test_samples = dataLoader(opt.data_path + '/val/',
                                          1,
                                          img_shape,
                                          label_classes,
                                          train_mode=False)
# define model, the last dimension is the channel
label = tf.placeholder(tf.float32,
                       shape=[None] + img_shape + [len(label_classes)])
# define model
with tf.name_scope('network'):
    modelFN = modelFns["crfunet"]
    model = modelFN(opt.num_class, img_shape=img_shape + [3])
    img = model.input
    pred = model.output
# define loss
with tf.name_scope('cross_entropy'):
    cross_entropy_loss = tf.reduce_mean(
        tf.nn.softmax_cross_entropy_with_logits(labels=label, logits=pred))
Beispiel #2
0
    'crfunet': Models.CRFunet.CRFunet
}

# save and compute metrics
vis = VIS(save_path=opt.checkpoint_path)

# configuration session
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
''' Users define data loader (with train and test) '''
img_shape = [opt.imSize, opt.imSize]
label_classes = vis.palette_info()
train_generator, train_samples = dataLoader(opt.data_path + '/train/',
                                            opt.batch_size,
                                            img_shape,
                                            label_classes,
                                            mean=dataset_mean,
                                            std=dataset_std)
test_generator, test_samples = dataLoader(opt.data_path + '/val/',
                                          1,
                                          img_shape,
                                          label_classes,
                                          train_mode=False,
                                          mean=dataset_mean,
                                          std=dataset_std)

opt.iter_epoch = int(train_samples)
# define input holders
label = tf.placeholder(tf.float32,
                       shape=[None] + img_shape + [len(label_classes)])
weight_map = tf.placeholder(tf.float32, shape=img_shape)
Beispiel #3
0
from opts import dataset_mean, dataset_std  # set them in opts

CHECKPOINT = os.getcwd() + "\\checkpoints"

# save and compute metrics
vis = VIS(save_path=CHECKPOINT)

# configuration session
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
''' Users define data loader (with train and test) '''
img_shape = [opt.imSize, opt.imSize, opt.num_channels]
train_generator, train_samples = dataLoader(os.getcwd() + '\\train\\',
                                            opt.batch_size,
                                            img_shape,
                                            mean=dataset_mean,
                                            std=dataset_std)
test_generator, test_samples = dataLoader(os.getcwd() + '\\test\\',
                                          1,
                                          img_shape,
                                          train_mode=False,
                                          mean=dataset_mean,
                                          std=dataset_std)
# test_generator, test_samples = dataLoader(opt.data_path+'/test2/', 1,  img_shape, train_mode=False,mean=dataset_mean, std=dataset_std)

if opt.iter_epoch == 0:
    opt.iter_epoch = int(train_samples)
# define input holders
label = tf.placeholder(tf.int32, shape=[None] + img_shape[:-1])
# define model
Beispiel #4
0
# configure args
from opts import *

# save and compute metrics
vis = VIS(save_path=opt.checkpoint_path)

# configuration session
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
K.set_session(sess)
K.set_learning_phase(1)

# define data loader (with train and test)
train_generator, _, train_samples, _ = dataLoader(opt.data_path,
                                                  opt.batch_size, opt.imSizeX,
                                                  opt.imSizeY)

# define test loader (optional to replace above test_generator)
test_generator, test_samples = folderLoader(opt.data_path, opt.imSizeX,
                                            opt.imSizeY)
opt.iter_epoch = int(train_samples)

# define input holders
img_shape = (opt.imSizeY, opt.imSizeX, 3)
#img = tf.placeholder(tf.float32, shape=img_shape)
img = tf.placeholder(tf.int32, shape=img_shape)
label = tf.placeholder(tf.int32, shape=(None, opt.imSizeY, opt.imSizeX))

# define model
with tf.name_scope('unet'):
Beispiel #5
0
from utils import VIS, mean_IU
# configure args
from opts import *

# save and compute metrics
vis = VIS(save_path=opt.checkpoint_path)

# configuration session
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)

# define data loader (with train and test)
img_shape = [opt.imHeight, opt.imWidth]
#img_shape = [512, 512]
train_generator, train_samples = dataLoader(opt.data_path + '/',
                                            opt.batch_size, img_shape)
test_generator, test_samples = dataLoader(opt.data_path + '/val/',
                                          1,
                                          img_shape,
                                          train_mode=False)
# define test loader (optional to replace above test_generator)
# test_generator, test_samples = folderLoader(opt.data_path, imSize=(opt.imSize,opt.imSize))
opt.iter_epoch = int(train_samples)
# define input holders
label = tf.placeholder(tf.int32, shape=[None] + img_shape)
# define model
with tf.name_scope('unet'):
    model = UNet().create_model(img_shape=img_shape + [3],
                                num_class=opt.num_class)
    img = model.input
    pred = model.output
Beispiel #6
0
from opts import *
from opts import dataset_mean, dataset_std  # set them in opts

opt.data_path = os.getcwd()

vis = VIS(save_path=opt.load_from_checkpoint)

# configuration session
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)

# define data loader
img_shape = [opt.imSize, opt.imSize, opt.num_channels]
test_generator, test_samples = dataLoader(opt.data_path + '\\test\\',
                                          1,
                                          img_shape,
                                          train_mode=False)
# test_generator, test_samples = dataLoader(opt.data_path+'/train/', 1,  img_shape, train_mode=False)
# define model, the last dimension is the channel
label = tf.placeholder(tf.int32, shape=[None] + img_shape[:-1])
with tf.name_scope('unet'):
    # model = UNet().create_model(img_shape=img_shape+[3], num_class=opt.num_class)
    model = UNet().create_model(img_shape=img_shape, num_class=opt.num_class)
    img = model.input
    pred = model.output
# define loss
# with tf.name_scope('cross_entropy'):
#     cross_entropy_loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(labels=label, logits=pred))
with tf.name_scope('dice_loss'):
    dice_loss = dice_coef_loss(label, pred)
Beispiel #7
0
def get_roi(dir_path):
    '''
    Extract brain ROI from mri scans of fetuses.
    :param dir_path: directory contains npz files which are inputs to the NN.
    :return: list of tuples of numpy arrays: (img, label, gt) 
    '''

    vis = VIS(save_path=opt.load_from_checkpoint)

    # configuration session
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    sess = tf.Session(config=config)

    # define data loader
    img_shape = [opt.imSize, opt.imSize, opt.num_channels]
    test_generator, test_samples = dataLoader(dir_path,
                                              1,
                                              img_shape,
                                              train_mode=False)

    # define model, the last dimension is the channel
    label = tf.placeholder(tf.int32, shape=[None] + img_shape[:-1])
    with tf.name_scope('unet'):
        # model = UNet().create_model(img_shape=img_shape+[3], num_class=opt.num_class)
        model = UNet().create_model(img_shape=img_shape,
                                    num_class=opt.num_class)
        img = model.input
        pred = model.output
    # define loss
    # with tf.name_scope('cross_entropy'):
    #     cross_entropy_loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(labels=label, logits=pred))
    with tf.name_scope('dice_loss'):
        dice_loss = dice_coef_loss(label, pred)

    saver = tf.train.Saver()  # must be added in the end
    ''' Main '''
    init_op = tf.global_variables_initializer()
    sess.run(init_op)
    with sess.as_default():
        # restore from a checkpoint if exists
        try:
            last_checkpoint = tf.train.latest_checkpoint(opt.checkpoint_path)
            # saver.restore(sess, opt.load_from_checkpoint)
            print('--> load from checkpoint ' + last_checkpoint)
            saver.restore(sess, last_checkpoint)
        except Exception as ex:
            print('unable to load checkpoint ... {}'.format(ex))
            print('tried loading from: {}'.format(opt.checkpoint_path))
            sys.exit(0)
        dice_score = 0

        # tuples of (img, label, gt)
        results = []

        for it in range(0, test_samples):
            x_batch, y_batch = next(test_generator)
            # tensorflow wants a different tensor order
            feed_dict = {img: x_batch, label: y_batch}
            # loss, pred_logits = sess.run([cross_entropy_loss, pred], feed_dict=feed_dict)
            # pred_map = np.argmax(pred_logits[0], axis=2)
            loss, pred_logits = sess.run([dice_loss, pred],
                                         feed_dict=feed_dict)
            pred_map_batch = pred_logits > 0.5
            pred_map = pred_map_batch.squeeze()
            score = vis.add_sample(pred_map, y_batch[0])

            im, gt = deprocess(x_batch[0], dataset_mean, dataset_std,
                               y_batch[0])
            results.append((im, pred_map, gt))

            # vis.save_seg(pred_map, name='{0:04d}_{1:.3f}.png'.format(it, score), im=im, gt=gt)
            # print ('[iter %f]: loss=%f, meanIU=%f' % (it, loss, score))

        vis.compute_scores()
        return results