コード例 #1
0
def main(argv):

    # turn off log message
    tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.FATAL)

    # check folder
    if not os.path.exists(os.path.join(FLAGS.outdir, 'tensorboard')):
        os.makedirs(os.path.join(FLAGS.outdir, 'tensorboard'))
    if not os.path.exists(os.path.join(FLAGS.outdir, 'model')):
        os.makedirs(os.path.join(FLAGS.outdir, 'model'))

    # save flag file
    FLAGS.flags_into_string()
    FLAGS.append_flags_into_file(os.path.join(FLAGS.outdir, 'flagfile.txt'))

    # get tfrecord list
    train_data_list = glob.glob(FLAGS.indir + '/*')
    # shuffle list
    random.shuffle(train_data_list)

    # load train data
    train_set = tf.data.Dataset.list_files(train_data_list)
    train_set = train_set.apply(
        tf.data.experimental.parallel_interleave(
            lambda x: tf.data.TFRecordDataset(x), cycle_length=os.cpu_count()))
    train_set = train_set.map(
        lambda x: utils._parse_function(x, image_size=FLAGS.image_size),
        num_parallel_calls=os.cpu_count())
    train_set = train_set.shuffle(buffer_size=FLAGS.shuffle_buffer_size)
    train_set = train_set.repeat()
    train_set = train_set.batch(FLAGS.batch_size)
    train_iter = train_set.make_one_shot_iterator()
    train_data = train_iter.get_next()

    # initializer
    init_op = tf.group(tf.initializers.global_variables(),
                       tf.initializers.local_variables())

    with tf.Session(config=utils.config(index=FLAGS.gpu_index)) as sess:
        # set network
        kwargs = {
            'sess': sess,
            'latent_dim': FLAGS.latent_dim,
            'scale_lambda': FLAGS.scale_lambda,
            'scale_kappa': FLAGS.scale_kappa,
            'scale_psi': FLAGS.scale_psi,
            'k_size': FLAGS.k_size,
            'image_size': FLAGS.image_size,
            'points_num': FLAGS.points_num,
            'encoder_layer': encoder_layer,
            'points_encoder_layer': points_encoder_layer,
            'generator_layer': generator_layer,
            'discriminator_layer': discriminator_layer,
            'code_discriminator_layer': code_discriminator_layer,
            'lr': FLAGS.lr,
            'is_training': True
        }

        Model = conditional_alphaGAN(**kwargs)

        # print parameters
        utils.cal_parameter()

        # prepare tensorboard
        writer_e_loss = tf.summary.FileWriter(
            os.path.join(FLAGS.outdir, 'tensorboard', 'e_loss'), sess.graph)
        writer_g_loss = tf.summary.FileWriter(
            os.path.join(FLAGS.outdir, 'tensorboard', 'g_loss'))
        writer_d_loss = tf.summary.FileWriter(
            os.path.join(FLAGS.outdir, 'tensorboard', 'd_loss'))
        writer_c_loss = tf.summary.FileWriter(
            os.path.join(FLAGS.outdir, 'tensorboard', 'c_loss'))

        # saving loss operation
        value_loss = tf.Variable(0.0)
        tf.summary.scalar("loss", value_loss)
        merge_op = tf.summary.merge_all()

        # initialize
        sess.run(init_op)

        step_e, step_g, step_d, step_c, = [], [], [], []

        # training
        tbar = tqdm(range(FLAGS.num_iteration), ascii=True)
        for step in tbar:
            for i in range(FLAGS.e_g_step):
                train_image_batch, points_batch = sess.run(train_data)
                noise = np.random.normal(
                    0., 1., size=[FLAGS.batch_size, FLAGS.latent_dim])
                e_loss = Model.update_e(train_image_batch, points_batch)
                g_loss = Model.update_g(train_image_batch, points_batch, noise)

            for i in range(FLAGS.d_step):
                d_loss = Model.update_d(train_image_batch, points_batch, noise)

            c_loss = Model.update_c(train_image_batch, points_batch, noise)

            step_e.append(e_loss)
            step_g.append(g_loss)
            step_d.append(d_loss)
            step_c.append(c_loss)

            if step % FLAGS.save_loss_step is 0:
                s = "e_loss: {:.4f}, g_loss: {:.4f}, d_loss: {:.4f}, c_loss: {:.4f}".format(
                    np.mean(step_e), np.mean(step_g), np.mean(step_d),
                    np.mean(step_c))
                tbar.set_description(s)

                summary_e = sess.run(merge_op, {value_loss: np.mean(step_e)})
                summary_g = sess.run(merge_op, {value_loss: np.mean(step_g)})
                summary_d = sess.run(merge_op, {value_loss: np.mean(step_d)})
                summary_c = sess.run(merge_op, {value_loss: np.mean(step_c)})

                writer_e_loss.add_summary(summary_e, step)
                writer_g_loss.add_summary(summary_g, step)
                writer_d_loss.add_summary(summary_d, step)
                writer_c_loss.add_summary(summary_c, step)

                step_e.clear()
                step_g.clear()
                step_d.clear()
                step_c.clear()

            if step % FLAGS.save_model_step is 0:
                # save model
                Model.save_model(FLAGS.outdir, step)
コード例 #2
0
def main(argv):

    # turn off log message
    tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.FATAL)

    # check folder
    if not os.path.exists(FLAGS.dir):
        raise Exception("model dirctory is not existed!")
    if not os.path.exists(os.path.join(FLAGS.dir, 'dice')):
        os.makedirs(os.path.join(FLAGS.dir, 'dice'))

    # get ground truth list
    ground_truth_list = io.load_list(FLAGS.ground_truth)

    # load ground truth
    ground_truth = io.load_data_from_path(ground_truth_list, dtype='int32')

    # get tfrecord list
    test_data_list = glob.glob(FLAGS.indir + '/*')

    # load test data
    test_set = tf.data.TFRecordDataset(test_data_list)
    test_set = test_set.map(lambda x: utils._parse_function_val_test(
        x, image_size=FLAGS.image_size),
                            num_parallel_calls=os.cpu_count())
    test_set = test_set.repeat()
    test_set = test_set.batch(FLAGS.batch_size)
    test_iter = test_set.make_one_shot_iterator()
    test_data = test_iter.get_next()

    # initializer
    init_op = tf.group(tf.initializers.global_variables(),
                       tf.initializers.local_variables())

    with tf.Session(config=utils.config(index=FLAGS.gpu_index)) as sess:
        # set network
        kwargs = {
            'sess': sess,
            'latent_dim': FLAGS.latent_dim,
            'scale_lambda': FLAGS.scale_lambda,
            'scale_kappa': FLAGS.scale_kappa,
            'scale_psi': FLAGS.scale_psi,
            'image_size': FLAGS.image_size,
            'points_num': FLAGS.points_num,
            'k_size': FLAGS.k_size,
            'encoder_layer': encoder_layer,
            'points_encoder_layer': points_encoder_layer,
            'generator_layer': generator_layer,
            'discriminator_layer': discriminator_layer,
            'code_discriminator_layer': code_discriminator_layer,
            'is_training': False
        }

        Model = conditional_alphaGAN(**kwargs)

        sess.run(init_op)

        # print parameters
        utils.cal_parameter()

        # test
        dice_list = []

        Model.restore_model(FLAGS.dir +
                            '/model/model_{}'.format(FLAGS.model_index))
        for i in range(FLAGS.num_of_test):
            _, test_points_batch, _ = sess.run(test_data)

            np.random.seed(4)

            tbar = tqdm(range(FLAGS.num_of_generate // FLAGS.batch_size),
                        ascii=True)
            for j in tbar:

                z = np.random.normal(0.,
                                     1.,
                                     size=[FLAGS.batch_size, FLAGS.latent_dim])
                # z = utils.truncated_noise_sample(FLAGS.batch_size, FLAGS.latent_dim, truncation=2.0)
                generate_batch = Model.generate_sample(z, test_points_batch)

                # save logodds
                generate_batch_ = np.asarray(generate_batch)
                generate_batch_ = generate_batch_[0, :, :, :]
                for image_index in range(generate_batch_.shape[0]):
                    gen = generate_batch_[image_index][:, :, :, 0]
                    io.write_mhd_and_raw(
                        gen,
                        '{}.mhd'.format(
                            os.path.join(
                                FLAGS.dir, 'dice', '{}'.format(i),
                                '{}'.format(j * FLAGS.batch_size +
                                            image_index))),
                        spacing=[1, 1, 1],
                        origin=[0, 0, 0],
                        compress=True)

                if j is 0:
                    data = np.asarray(generate_batch)[0]
                    label = np.where(data > 0.5, 0, 1)
                    label = label.astype(np.int8)
                    pa = np.sum(label, axis=0)
                else:
                    data = np.asarray(generate_batch)[0]
                    label_ = np.where(data > 0.5, 0, 1)
                    label_ = label_.astype(np.int8)
                    pa = pa + np.sum(label_, axis=0)

            pa = pa / float(FLAGS.num_of_generate)
            pa = pa.astype(np.float32)

            # output image
            io.write_mhd_and_raw(pa,
                                 '{}_{}.mhd'.format(
                                     os.path.join(FLAGS.dir, 'dice', 'PA'), i),
                                 spacing=[1, 1, 1],
                                 origin=[0, 0, 0],
                                 compress=True)

            # dice
            gt = ground_truth[i]
            gt = gt.astype(np.float32)
            dice = utils.dice_coef(gt, pa)
            dice_list.append([round(dice, 6)])
            print(dice)

        print('dice = %f' % np.mean(dice_list))
        # write csv
        io.write_csv(
            dice_list,
            os.path.join(FLAGS.dir, 'dice',
                         'dice_{}.csv'.format(FLAGS.model_index)), 'dice')
コード例 #3
0
def main(argv):

    # turn off log message
    tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.FATAL)

    # check folder
    if not os.path.exists(FLAGS.dir):
        raise Exception("model dirctory is not existed!")
    if not os.path.exists(os.path.join(FLAGS.dir, 'specificity')):
        os.makedirs(os.path.join(FLAGS.dir, 'specificity'))

    # load ground truth
    ground_truth_list = glob.glob(FLAGS.ground_truth + '/*.mhd')
    ground_truth = io.load_data_from_path(ground_truth_list, dtype='int32')

    # get tfrecord list
    test_data_list = glob.glob(FLAGS.indir + '/*')

    # load test data
    test_set = tf.data.TFRecordDataset(test_data_list)
    test_set = test_set.map(lambda x: utils._parse_function_val_test(
        x, image_size=FLAGS.image_size),
                            num_parallel_calls=os.cpu_count())
    # test_set = test_set.shuffle(buffer_size=FLAGS.num_of_test)
    test_set = test_set.repeat()
    test_set = test_set.batch(FLAGS.batch_size)
    test_iter = test_set.make_one_shot_iterator()
    test_data = test_iter.get_next()

    # initializer
    init_op = tf.group(tf.initializers.global_variables(),
                       tf.initializers.local_variables())

    with tf.Session(config=utils.config(index=FLAGS.gpu_index)) as sess:
        # set network
        kwargs = {
            'sess': sess,
            'latent_dim': FLAGS.latent_dim,
            'scale_lambda': FLAGS.scale_lambda,
            'scale_kappa': FLAGS.scale_kappa,
            'scale_psi': FLAGS.scale_psi,
            'image_size': FLAGS.image_size,
            'points_num': FLAGS.points_num,
            'k_size': FLAGS.k_size,
            'encoder_layer': encoder_layer,
            'points_encoder_layer': points_encoder_layer,
            'generator_layer': generator_layer,
            'discriminator_layer': discriminator_layer,
            'code_discriminator_layer': code_discriminator_layer,
            'is_training': False
        }

        Model = conditional_alphaGAN(**kwargs)

        sess.run(init_op)

        # print parameters
        utils.cal_parameter()

        # test
        Model.restore_model(FLAGS.dir +
                            '/model/model_{}'.format(FLAGS.model_index))

        tbar = tqdm(range(FLAGS.num_of_generate // FLAGS.batch_size),
                    ascii=True)
        for i in tbar:
            np.random.seed(4)

            z = np.random.normal(0.,
                                 1.,
                                 size=[FLAGS.batch_size, FLAGS.latent_dim])

            _, test_points_batch, _ = sess.run(test_data)
            generate_batch = Model.generate_sample(z, test_points_batch)

            # dilation of points
            test_points_dilate = tf.keras.layers.MaxPooling3D(
                pool_size=3, strides=1, padding='same')(test_points_batch)
            test_points_dilate = test_points_dilate.eval()
            test_points_dilate = test_points_dilate * 2  # scaling

            if i is 0:
                samples = np.asarray(generate_batch)[0]
                points = np.asarray(test_points_dilate)
            else:
                samples = np.concatenate(
                    (samples, np.asarray(generate_batch)[0]), axis=0)
                points = np.concatenate(
                    (points, np.asarray(test_points_dilate)), axis=0)

        # calculate Jaccard Index and output images
        specificity = []
        tbar = tqdm(range(samples.shape[0]), ascii=True)
        for i in tbar:
            gen = samples[i][:, :, :, 0]
            points_single = points[i][:, :, :, 0]

            # label
            gen_label = np.where(gen > 0.5, 0, 1)

            # calculate ji
            case_max_ji = 0.
            for image_index in range(ground_truth.shape[0]):
                ji = utils.jaccard(gen_label, ground_truth[image_index])
                if ji > case_max_ji:
                    case_max_ji = ji
            specificity.append([case_max_ji])

            # label and points
            label_and_points = gen_label + points_single

            gen_label = gen_label.astype(np.int8)
            label_and_points = label_and_points.astype(np.int8)

            # output image
            io.write_mhd_and_raw(gen,
                                 '{}.mhd'.format(
                                     os.path.join(FLAGS.dir, 'specificity',
                                                  'logodds',
                                                  'generate_{}'.format(i))),
                                 spacing=[1, 1, 1],
                                 origin=[0, 0, 0],
                                 compress=True)
            io.write_mhd_and_raw(gen_label,
                                 '{}.mhd'.format(
                                     os.path.join(FLAGS.dir, 'specificity',
                                                  'label',
                                                  'generate_{}'.format(i))),
                                 spacing=[1, 1, 1],
                                 origin=[0, 0, 0],
                                 compress=True)
            io.write_mhd_and_raw(label_and_points,
                                 '{}.mhd'.format(
                                     os.path.join(FLAGS.dir, 'specificity',
                                                  'label_and_points',
                                                  'generate_{}'.format(i))),
                                 spacing=[1, 1, 1],
                                 origin=[0, 0, 0],
                                 compress=True)
        #
        print('specificity = %f' % np.mean(specificity))

        # write csv
        io.write_csv(
            specificity,
            os.path.join(FLAGS.dir, 'specificity_shape',
                         'specificity_{}.csv'.format(FLAGS.model_index)),
            'specificity')
コード例 #4
0
def main(argv):

    # turn off log message
    tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.FATAL)

    # check folder
    if not os.path.exists(FLAGS.dir):
        raise Exception("model dirctory is not existed!")
    if not os.path.exists(os.path.join(FLAGS.dir, 'generalization')):
        os.makedirs(os.path.join(FLAGS.dir, 'generalization'))

    # get tfrecord list
    test_data_list = glob.glob(FLAGS.indir + '/*')

    # test step
    test_step = FLAGS.num_of_test // FLAGS.batch_size
    if FLAGS.num_of_test % FLAGS.batch_size != 0:
        test_step += 1

    # load test data
    test_set = tf.data.TFRecordDataset(test_data_list)
    test_set = test_set.map(lambda x: utils._parse_function_val_test(
        x, image_size=FLAGS.image_size),
                            num_parallel_calls=os.cpu_count())
    test_set = test_set.batch(FLAGS.batch_size)
    test_iter = test_set.make_one_shot_iterator()
    test_data = test_iter.get_next()

    # initializer
    init_op = tf.group(tf.initializers.global_variables(),
                       tf.initializers.local_variables())

    with tf.Session(config=utils.config(index=FLAGS.gpu_index)) as sess:
        # set network
        kwargs = {
            'sess': sess,
            'latent_dim': FLAGS.latent_dim,
            'scale_lambda': FLAGS.scale_lambda,
            'scale_kappa': FLAGS.scale_kappa,
            'scale_psi': FLAGS.scale_psi,
            'image_size': FLAGS.image_size,
            'points_num': FLAGS.points_num,
            'k_size': FLAGS.k_size,
            'encoder_layer': encoder_layer,
            'points_encoder_layer': points_encoder_layer,
            'generator_layer': generator_layer,
            'discriminator_layer': discriminator_layer,
            'code_discriminator_layer': code_discriminator_layer,
            'is_training': False
        }

        Model = conditional_alphaGAN(**kwargs)

        sess.run(init_op)

        # print parameters
        utils.cal_parameter()

        # test
        Model.restore_model(FLAGS.dir +
                            '/model/model_{}'.format(FLAGS.model_index))
        tbar = tqdm(range(test_step), ascii=True)
        for i in tbar:
            test_image_batch, test_points_batch, test_label_batch = sess.run(
                test_data)
            reconstruction_batch = Model.reconstruction(
                test_image_batch, test_points_batch)

            # dilation of points
            test_points_batch = tf.keras.layers.MaxPooling3D(
                pool_size=5, strides=1, padding='same')(test_points_batch)
            test_points_batch = test_points_batch.eval()
            test_points_batch = test_points_batch * 2  # scaling

            if i is 0:
                test_label = np.asarray(test_label_batch)
                reconstruction = np.asarray(reconstruction_batch)[0]
                points = np.asarray(test_points_batch)
            else:
                test_label = np.concatenate(
                    (test_label, np.asarray(test_label_batch)), axis=0)
                reconstruction = np.concatenate(
                    (reconstruction, np.asarray(reconstruction_batch)[0]),
                    axis=0)
                points = np.concatenate((points, np.array(test_points_batch)),
                                        axis=0)

        # calculate Jaccard Index and output images
        generalization = []
        tbar = tqdm(range(reconstruction.shape[0]), ascii=True)
        for i in tbar:
            test_label_single = test_label[i][:, :, :, 0]
            reconstruction_single = reconstruction[i][:, :, :, 0]
            points_single = points[i][:, :, :, 0]

            # label
            rec_label = np.where(reconstruction_single > 0.5, 0, 1)
            rec_label = rec_label.astype(np.int8)

            # calculate ji
            generalization.append(
                [utils.jaccard(rec_label, test_label_single)])

            # label and points
            label_and_points = rec_label + points_single

            rec_label = rec_label.astype(np.int8)
            label_and_points = label_and_points.astype(np.int8)

            # output image
            io.write_mhd_and_raw(reconstruction_single,
                                 '{}.mhd'.format(
                                     os.path.join(FLAGS.dir, 'generalization',
                                                  'logodds',
                                                  'generate_{}'.format(i))),
                                 spacing=[1, 1, 1],
                                 origin=[0, 0, 0],
                                 compress=True)
            io.write_mhd_and_raw(rec_label,
                                 '{}.mhd'.format(
                                     os.path.join(FLAGS.dir, 'generalization',
                                                  'predict',
                                                  'recon_{}'.format(i))),
                                 spacing=[1, 1, 1],
                                 origin=[0, 0, 0],
                                 compress=True)
            io.write_mhd_and_raw(label_and_points,
                                 '{}.mhd'.format(
                                     os.path.join(FLAGS.dir, 'generalization',
                                                  'label_and_points',
                                                  'generate_{}'.format(i))),
                                 spacing=[1, 1, 1],
                                 origin=[0, 0, 0],
                                 compress=True)

        print('generalization = %f' % np.mean(generalization))

        # write csv
        io.write_csv(
            generalization,
            os.path.join(FLAGS.dir, 'generalization',
                         'generalization_val_{}.csv'.format(
                             FLAGS.model_index)), 'generalization')
コード例 #5
0
def main(argv):

    # turn off log message
    tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.FATAL)

    # check folder
    if not os.path.exists(os.path.join(FLAGS.dir, 'tensorboard')):
        os.makedirs(os.path.join(FLAGS.dir, 'tensorboard'))
    if not os.path.exists(FLAGS.dir):
        raise Exception("model dirctory is not existed!")

    # get tfrecord list
    val_data_list = glob.glob(FLAGS.indir + '/*')

    # get ground truth list
    ground_truth_list = glob.glob(FLAGS.ground_truth + '/*.mhd')

    # load ground truth
    ground_truth = io.load_data_from_path(ground_truth_list, dtype='int32')

    # number of model
    num_of_model = FLAGS.train_iteration // FLAGS.save_model_step
    num_of_model = num_of_model + 1 if FLAGS.train_iteration % FLAGS.save_model_step is not 0 else num_of_model - 1

    # val_iter
    num_val_iter = FLAGS.num_of_val // FLAGS.batch_size
    if FLAGS.num_of_val % FLAGS.batch_size != 0:
        num_val_iter += 1

    # load val data
    val_set = tf.data.Dataset.list_files(val_data_list)
    val_set = val_set.apply(
        tf.data.experimental.parallel_interleave(
            lambda x: tf.data.TFRecordDataset(x), cycle_length=os.cpu_count()))
    val_set = val_set.map(lambda x: utils._parse_function_val_test(
        x, image_size=FLAGS.image_size),
                          num_parallel_calls=os.cpu_count())
    val_set = val_set.repeat()
    val_set = val_set.batch(FLAGS.batch_size)
    val_set = val_set.make_one_shot_iterator()
    val_data = val_set.get_next()

    # initializer
    init_op = tf.group(tf.initializers.global_variables(),
                       tf.initializers.local_variables())

    with tf.Session(config=utils.config(index=FLAGS.gpu_index)) as sess:
        # set network
        kwargs = {
            'sess': sess,
            'latent_dim': FLAGS.latent_dim,
            'scale_lambda': FLAGS.scale_lambda,
            'scale_kappa': FLAGS.scale_kappa,
            'scale_psi': FLAGS.scale_psi,
            'image_size': FLAGS.image_size,
            'points_num': FLAGS.points_num,
            'k_size': FLAGS.k_size,
            'encoder_layer': encoder_layer,
            'points_encoder_layer': points_encoder_layer,
            'generator_layer': generator_layer,
            'discriminator_layer': discriminator_layer,
            'code_discriminator_layer': code_discriminator_layer,
            'is_training': False
        }

        Model = conditional_alphaGAN(**kwargs)

        # print parameters
        utils.cal_parameter()

        # prepare tensorboard
        writer_gen = tf.summary.FileWriter(
            os.path.join(FLAGS.dir, 'tensorboard', 'val_generalization'))
        writer_spe = tf.summary.FileWriter(
            os.path.join(FLAGS.dir, 'tensorboard', 'val_specificity'))
        writer_val_ls = tf.summary.FileWriter(
            os.path.join(FLAGS.dir, 'tensorboard', 'val_ls'))
        writer_val_eikonal = tf.summary.FileWriter(
            os.path.join(FLAGS.dir, 'tensorboard', 'val_eikonal'))
        writer_all = tf.summary.FileWriter(
            os.path.join(FLAGS.dir, 'tensorboard',
                         'val_all'))  # mean of generalization and specificity

        # saving loss operation
        value_loss = tf.Variable(0.0)
        tf.summary.scalar("evaluation", value_loss)
        merge_op = tf.summary.merge_all()

        # initialize
        sess.run(init_op)

        # validation
        tbar = tqdm(range(num_of_model), ascii=True)
        for step in tbar:
            Model.restore_model(
                FLAGS.dir +
                '/model/model_{}'.format(step * FLAGS.save_model_step))

            generalization, specificity, val_ls, val_eikonal = [], [], [], []
            points = []
            real_image = []

            # reconstruction
            for i in range(num_val_iter):
                val_image_batch, val_points_batch, val_label_batch = sess.run(
                    val_data)
                points.append(val_points_batch)
                real_image.append(val_image_batch)

                reconstruction_batch = Model.reconstruction(
                    val_image_batch, val_points_batch)

                if i is 0:
                    val_label = np.asarray(val_label_batch)
                    reconstruction = np.asarray(reconstruction_batch)[0]
                else:
                    val_label = np.concatenate(
                        (val_label, np.asarray(val_label_batch)), axis=0)
                    reconstruction = np.concatenate(
                        (reconstruction, np.asarray(reconstruction_batch)[0]),
                        axis=0)

            # calculate generalization
            for i in range(reconstruction.shape[0]):
                val_label_single = val_label[i][:, :, :, 0]
                reconstruction_single = reconstruction[i][:, :, :, 0]

                # label
                rec_label = np.where(reconstruction_single > 0.5, 0, 1)

                # calculate ji
                generalization.append(
                    [utils.jaccard(rec_label, val_label_single)])

            # samples from latent space
            points_ls = np.ones_like(points) * 0.5
            for i in range(FLAGS.num_of_generate // FLAGS.batch_size):
                shuffle_fun(points_ls, points)

                z = np.random.normal(0.,
                                     1.,
                                     size=[FLAGS.batch_size, FLAGS.latent_dim])
                generate_batch, level_set_loss, eikonal_loss = Model.validation_specificity(
                    points_ls[random.randint(0, num_val_iter - 1)], z,
                    points[random.randint(0, num_val_iter - 1)])

                val_ls.append(level_set_loss)
                val_eikonal.append(eikonal_loss)

                if i is 0:
                    samples = np.asarray(generate_batch)
                else:
                    samples = np.concatenate(
                        (samples, np.asarray(generate_batch)), axis=0)

            # calculate specificity
            for i in range(samples.shape[0]):
                gen = samples[i][:, :, :, 0]

                # label
                gen_label = np.where(gen > 0.5, 0, 1)

                # calculate ji
                case_max_ji = 0.
                for image_index in range(ground_truth.shape[0]):
                    ji = utils.jaccard(gen_label, ground_truth[image_index])
                    if ji > case_max_ji:
                        case_max_ji = ji
                specificity.append([case_max_ji])

            s = "val_generalization: {:.4f}, val_specificity: {:.4f}, ls: {:.4f}, eikonal: {:.4f}, mean: {:.4f}".format(
                np.mean(generalization), np.mean(specificity), np.mean(val_ls),
                np.mean(val_eikonal),
                (np.mean(generalization) + np.mean(specificity)) / 2.)

            tbar.set_description(s)

            summary_gen = sess.run(merge_op,
                                   {value_loss: np.mean(generalization)})
            summary_spe = sess.run(merge_op,
                                   {value_loss: np.mean(specificity)})
            summary_ls = sess.run(merge_op, {value_loss: np.mean(val_ls)})
            summary_eikonal = sess.run(merge_op,
                                       {value_loss: np.mean(val_eikonal)})
            summary_all = sess.run(merge_op, {
                value_loss:
                (np.mean(generalization) + np.mean(specificity)) / 2.
            })

            writer_gen.add_summary(summary_gen, step * FLAGS.save_model_step)
            writer_spe.add_summary(summary_spe, step * FLAGS.save_model_step)
            writer_val_ls.add_summary(summary_ls, step * FLAGS.save_model_step)
            writer_val_eikonal.add_summary(summary_eikonal,
                                           step * FLAGS.save_model_step)
            writer_all.add_summary(summary_all, step * FLAGS.save_model_step)

            generalization.clear()
            specificity.clear()
            val_ls.clear()
            val_eikonal.clear()
            points.clear()
            real_image.clear()