Esempio n. 1
0
def main():

    # tf flag
    flags = tf.flags
    flags.DEFINE_string("train_data_txt", "./train.txt", "train data txt")
    flags.DEFINE_string("val_data_txt", "./val.txt", "validation data txt")
    flags.DEFINE_string("outdir", "./output/", "outdir")
    flags.DEFINE_float("beta", 1, "hyperparameter beta")
    flags.DEFINE_integer("num_of_val", 600, "number of validation data")
    flags.DEFINE_integer("batch_size", 30, "batch size")
    flags.DEFINE_integer("num_iteration", 500001, "number of iteration")
    flags.DEFINE_integer("save_loss_step", 200, "step of save loss")
    flags.DEFINE_integer("save_model_step", 500,
                         "step of save model and validation")
    flags.DEFINE_integer("shuffle_buffer_size", 1000, "buffer size of shuffle")
    flags.DEFINE_integer("latent_dim", 6, "latent dim")
    flags.DEFINE_list("image_size", [9 * 9 * 9], "image size")
    flags.DEFINE_string("model", './model/model_{}', "pre training model1")
    flags.DEFINE_string("model2", './model/model_{}', "pre training model2")
    flags.DEFINE_boolean("is_n1_opt", True, "n1_opt")
    FLAGS = flags.FLAGS

    # check folder
    if not (os.path.exists(os.path.join(FLAGS.outdir, 'tensorboard',
                                        'train'))):
        os.makedirs(os.path.join(FLAGS.outdir, 'tensorboard', 'train'))
    if not (os.path.exists(os.path.join(FLAGS.outdir, 'tensorboard', 'val'))):
        os.makedirs(os.path.join(FLAGS.outdir, 'tensorboard', 'val'))
    if not (os.path.exists(os.path.join(FLAGS.outdir, 'tensorboard', 'rec'))):
        os.makedirs(os.path.join(FLAGS.outdir, 'tensorboard', 'rec'))
    if not (os.path.exists(os.path.join(FLAGS.outdir, 'tensorboard', 'kl'))):
        os.makedirs(os.path.join(FLAGS.outdir, 'tensorboard', 'kl'))
    if not (os.path.exists(os.path.join(FLAGS.outdir, 'model'))):
        os.makedirs(os.path.join(FLAGS.outdir, 'model'))

    # read list
    train_data_list = io.load_list(FLAGS.train_data_txt)
    val_data_list = io.load_list(FLAGS.val_data_txt)

    # shuffle list
    random.shuffle(train_data_list)
    # val step
    val_step = FLAGS.num_of_val // FLAGS.batch_size
    if FLAGS.num_of_val % FLAGS.batch_size != 0:
        val_step += 1

    # load train data and validation data
    train_set = tf.data.Dataset.list_files(train_data_list)
    train_set = train_set.apply(
        tf.contrib.data.parallel_interleave(tf.data.TFRecordDataset,
                                            cycle_length=6))
    # train_set = tf.data.TFRecordDataset(train_data_list)
    train_set = train_set.map(
        lambda x: _parse_function(x, image_size=FLAGS.image_size),
        num_parallel_calls=os.cpu_count())
    train_set = train_set.shuffle(buffer_size=FLAGS.shuffle_buffer_size)
    train_set = train_set.repeat()
    train_set = train_set.batch(FLAGS.batch_size)
    train_iter = train_set.make_one_shot_iterator()
    train_data = train_iter.get_next()

    val_set = tf.data.Dataset.list_files(val_data_list)
    val_set = val_set.apply(
        tf.contrib.data.parallel_interleave(tf.data.TFRecordDataset,
                                            cycle_length=os.cpu_count()))
    # val_set = tf.data.TFRecordDataset(val_data_list)
    val_set = val_set.map(
        lambda x: _parse_function(x, image_size=FLAGS.image_size),
        num_parallel_calls=os.cpu_count())
    val_set = val_set.repeat()
    val_set = val_set.batch(FLAGS.batch_size)
    val_iter = val_set.make_one_shot_iterator()
    val_data = val_iter.get_next()

    # initializer
    init_op = tf.group(tf.initializers.global_variables(),
                       tf.initializers.local_variables())

    with tf.Session(config=utils.config) as sess:
        # with tf.Session() as sess:
        # set network
        kwargs = {
            'sess': sess,
            'outdir': FLAGS.outdir,
            'beta': FLAGS.beta,
            'latent_dim': FLAGS.latent_dim,
            'batch_size': FLAGS.batch_size,
            'image_size': FLAGS.image_size,
            'encoder': encoder_mlp,
            'decoder': decoder_mlp,
            'is_res': False
        }
        VAE = Variational_Autoencoder(**kwargs)

        kwargs_2 = {
            'sess': sess,
            'outdir': FLAGS.outdir,
            'beta': FLAGS.beta,
            'latent_dim': 8,
            'batch_size': FLAGS.batch_size,
            'image_size': FLAGS.image_size,
            'encoder': encoder_mlp2,
            'decoder': decoder_mlp_tanh,
            'is_res': True,
            'is_constraints': False,
            # 'keep_prob': 0.5
        }

        VAE_2 = Variational_Autoencoder(**kwargs_2)
        # print parmeters
        utils.cal_parameter()

        # prepare tensorboard
        writer_train = tf.summary.FileWriter(
            os.path.join(FLAGS.outdir, 'tensorboard', 'train'), sess.graph)
        writer_val = tf.summary.FileWriter(
            os.path.join(FLAGS.outdir, 'tensorboard', 'val'))
        writer_rec = tf.summary.FileWriter(
            os.path.join(FLAGS.outdir, 'tensorboard', 'rec'))
        writer_kl = tf.summary.FileWriter(
            os.path.join(FLAGS.outdir, 'tensorboard', 'kl'))

        value_loss = tf.Variable(0.0)
        tf.summary.scalar("loss", value_loss)
        merge_op = tf.summary.merge_all()

        # initialize
        sess.run(init_op)

        # use pre trained model
        # ckpt_state = tf.train.get_checkpoint_state(FLAGS.model)
        #
        # if ckpt_state:
        #     restore_model = ckpt_state.model_checkpoint_path
        #     # VAE.restore_model(FLAGS.model+'model_{}'.format(FLAGS.itr))
        VAE.restore_model(FLAGS.model)
        if FLAGS.is_n1_opt == True:
            VAE_2.restore_model(FLAGS.model2)

        # training
        tbar = tqdm(range(FLAGS.num_iteration), ascii=True)
        for i in tbar:
            train_data_batch = sess.run(train_data)
            if FLAGS.is_n1_opt == True:
                VAE.update(train_data_batch)

            output1 = VAE.reconstruction_image(train_data_batch)

            train_loss, rec_loss, kl_loss = VAE_2.update2(
                train_data_batch, output1)

            if i % FLAGS.save_loss_step is 0:
                s = "Loss: {:.4f}, rec_loss: {:.4f}, kl_loss: {:.4f}".format(
                    train_loss, rec_loss, kl_loss)
                tbar.set_description(s)
                summary_train_loss = sess.run(merge_op,
                                              {value_loss: train_loss})
                writer_train.add_summary(summary_train_loss, i)

                summary_rec_loss = sess.run(merge_op, {value_loss: rec_loss})
                summary_kl_loss = sess.run(merge_op, {value_loss: kl_loss})
                writer_rec.add_summary(summary_rec_loss, i)
                writer_kl.add_summary(summary_kl_loss, i)

            if i % FLAGS.save_model_step is 0:
                # save model
                VAE.save_model(i)
                VAE_2.save_model2(i)

                # validation
                val_loss = 0.
                for j in range(val_step):
                    val_data_batch = sess.run(val_data)
                    val_data_batch_output1 = VAE.reconstruction_image(
                        val_data_batch)

                    val_loss += VAE_2.validation2(val_data_batch,
                                                  val_data_batch_output1)
                val_loss /= val_step

                summary_val = sess.run(merge_op, {value_loss: val_loss})
                writer_val.add_summary(summary_val, i)
Esempio n. 2
0
def main(argv):

    # turn off log message
    tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.FATAL)

    # check folder
    if not os.path.exists(os.path.join(FLAGS.outdir, 'tensorboard')):
        os.makedirs(os.path.join(FLAGS.outdir, 'tensorboard'))
    if not os.path.exists(os.path.join(FLAGS.outdir, 'model')):
        os.makedirs(os.path.join(FLAGS.outdir, 'model'))

    # save flag file
    FLAGS.flags_into_string()
    FLAGS.append_flags_into_file(os.path.join(FLAGS.outdir, 'flagfile.txt'))

    # get tfrecord list
    train_data_list = glob.glob(FLAGS.indir + '/*')
    # shuffle list
    random.shuffle(train_data_list)

    # load train data
    train_set = tf.data.Dataset.list_files(train_data_list)
    train_set = train_set.apply(
        tf.data.experimental.parallel_interleave(
            lambda x: tf.data.TFRecordDataset(x), cycle_length=os.cpu_count()))
    train_set = train_set.map(
        lambda x: utils._parse_function(x, image_size=FLAGS.image_size),
        num_parallel_calls=os.cpu_count())
    train_set = train_set.shuffle(buffer_size=FLAGS.shuffle_buffer_size)
    train_set = train_set.repeat()
    train_set = train_set.batch(FLAGS.batch_size)
    train_iter = train_set.make_one_shot_iterator()
    train_data = train_iter.get_next()

    # initializer
    init_op = tf.group(tf.initializers.global_variables(),
                       tf.initializers.local_variables())

    with tf.Session(config=utils.config(index=FLAGS.gpu_index)) as sess:
        # set network
        kwargs = {
            'sess': sess,
            'latent_dim': FLAGS.latent_dim,
            'scale_lambda': FLAGS.scale_lambda,
            'scale_kappa': FLAGS.scale_kappa,
            'scale_psi': FLAGS.scale_psi,
            'k_size': FLAGS.k_size,
            'image_size': FLAGS.image_size,
            'points_num': FLAGS.points_num,
            'encoder_layer': encoder_layer,
            'points_encoder_layer': points_encoder_layer,
            'generator_layer': generator_layer,
            'discriminator_layer': discriminator_layer,
            'code_discriminator_layer': code_discriminator_layer,
            'lr': FLAGS.lr,
            'is_training': True
        }

        Model = conditional_alphaGAN(**kwargs)

        # print parameters
        utils.cal_parameter()

        # prepare tensorboard
        writer_e_loss = tf.summary.FileWriter(
            os.path.join(FLAGS.outdir, 'tensorboard', 'e_loss'), sess.graph)
        writer_g_loss = tf.summary.FileWriter(
            os.path.join(FLAGS.outdir, 'tensorboard', 'g_loss'))
        writer_d_loss = tf.summary.FileWriter(
            os.path.join(FLAGS.outdir, 'tensorboard', 'd_loss'))
        writer_c_loss = tf.summary.FileWriter(
            os.path.join(FLAGS.outdir, 'tensorboard', 'c_loss'))

        # saving loss operation
        value_loss = tf.Variable(0.0)
        tf.summary.scalar("loss", value_loss)
        merge_op = tf.summary.merge_all()

        # initialize
        sess.run(init_op)

        step_e, step_g, step_d, step_c, = [], [], [], []

        # training
        tbar = tqdm(range(FLAGS.num_iteration), ascii=True)
        for step in tbar:
            for i in range(FLAGS.e_g_step):
                train_image_batch, points_batch = sess.run(train_data)
                noise = np.random.normal(
                    0., 1., size=[FLAGS.batch_size, FLAGS.latent_dim])
                e_loss = Model.update_e(train_image_batch, points_batch)
                g_loss = Model.update_g(train_image_batch, points_batch, noise)

            for i in range(FLAGS.d_step):
                d_loss = Model.update_d(train_image_batch, points_batch, noise)

            c_loss = Model.update_c(train_image_batch, points_batch, noise)

            step_e.append(e_loss)
            step_g.append(g_loss)
            step_d.append(d_loss)
            step_c.append(c_loss)

            if step % FLAGS.save_loss_step is 0:
                s = "e_loss: {:.4f}, g_loss: {:.4f}, d_loss: {:.4f}, c_loss: {:.4f}".format(
                    np.mean(step_e), np.mean(step_g), np.mean(step_d),
                    np.mean(step_c))
                tbar.set_description(s)

                summary_e = sess.run(merge_op, {value_loss: np.mean(step_e)})
                summary_g = sess.run(merge_op, {value_loss: np.mean(step_g)})
                summary_d = sess.run(merge_op, {value_loss: np.mean(step_d)})
                summary_c = sess.run(merge_op, {value_loss: np.mean(step_c)})

                writer_e_loss.add_summary(summary_e, step)
                writer_g_loss.add_summary(summary_g, step)
                writer_d_loss.add_summary(summary_d, step)
                writer_c_loss.add_summary(summary_c, step)

                step_e.clear()
                step_g.clear()
                step_d.clear()
                step_c.clear()

            if step % FLAGS.save_model_step is 0:
                # save model
                Model.save_model(FLAGS.outdir, step)
Esempio n. 3
0
def main():

    # tf flag
    flags = tf.flags
    flags.DEFINE_string("data_path", "D:/M1_lecture/lecture/image_analysis/no_normalized_data.csv", "data path")
    flags.DEFINE_string("outdir", "D:/M1_lecture/lecture/image_analysis/no_norm", "outdir path")
    flags.DEFINE_string("gpu_index", "0", "GPU-index")
    flags.DEFINE_integer("num_epoch", 1000, "number of iteration")
    flags.DEFINE_integer("kfold", 10, "number of fold")
    flags.DEFINE_list("input_size", [22], "input vector size")
    flags.DEFINE_bool("cv", True, "if 10 fold CV or not")
    FLAGS = flags.FLAGS

    # load train data
    data = np.loadtxt(FLAGS.data_path, delimiter=",").astype(np.float32)
    feature = data[:, :data.shape[1]-1]
    label = data[:, data.shape[1]-1]

    # initializer
    init_op = tf.group(tf.initializers.global_variables(),
                       tf.initializers.local_variables())

    with tf.Session(config = utils.config(index=FLAGS.gpu_index)) as sess:
        # Resubstitution Method
        if FLAGS.cv == False:
            # set network
            kwargs = {
                'sess': sess,
                'input_size': FLAGS.input_size,
                'learning_rate': 1e-3
            }
            NN = neural_network(**kwargs)

            # print parmeters
            utils.cal_parameter()

            # check floder
            if not (os.path.exists(os.path.join(FLAGS.outdir, 'R', 'tensorboard'))):
                os.makedirs(os.path.join(FLAGS.outdir, 'R', 'tensorboard'))
            if not (os.path.exists(os.path.join(FLAGS.outdir, 'R', 'model'))):
                os.makedirs(os.path.join(FLAGS.outdir, 'R', 'model'))
            if not (os.path.exists(os.path.join(FLAGS.outdir, 'R', 'predict'))):
                os.makedirs(os.path.join(FLAGS.outdir, 'R', 'predict'))

            # prepare tensorboard
            writer_train = tf.summary.FileWriter(os.path.join(FLAGS.outdir, 'R', 'tensorboard', 'train'), sess.graph)
            writer_test = tf.summary.FileWriter(os.path.join(FLAGS.outdir, 'R', 'tensorboard', 'test'))
            value_loss = tf.Variable(0.0)
            tf.summary.scalar("loss", value_loss)
            merge_op = tf.summary.merge_all()

            # initialize
            sess.run(init_op)

            # training
            tbar = tqdm(range(FLAGS.num_epoch), ascii=True)
            for i in tbar:
                train_step, train_data = utils.batch_iter(feature, label, batch_size=feature.shape[0], shuffle=True)
                train_data_batch = next(train_data)

                train_loss = NN.update(train_data_batch[0], np.reshape(train_data_batch[1] , (train_data_batch[1].shape[0],1)))
                s = "Loss: {:.4f}".format(np.mean(train_loss))
                tbar.set_description(s)

                summary_train_loss = sess.run(merge_op, {value_loss: np.mean(train_loss)})
                writer_train.add_summary(summary_train_loss, i+1)

                NN.save_model(i+1, outdir=os.path.join(FLAGS.outdir, 'R'))

            # test
            test_loss_min = []
            sess.run(init_op)
            test_step, test_data = utils.batch_iter(feature, label, batch_size=feature.shape[0], shuffle=False)

            tbar = tqdm(range(FLAGS.num_epoch), ascii=True)
            for i in tbar:
                NN.restore_model(os.path.join(FLAGS.outdir, 'R', 'model', 'model_{}'.format(i+1)))
                test_data_batch = next(test_data)
                test_loss, predict = NN.test(test_data_batch[0], np.reshape(test_data_batch[1], (test_data_batch[1].shape[0], 1)))
                s = "Loss: {:.4f}".format(np.mean(test_loss))
                tbar.set_description(s)
                test_loss_min.append(np.mean(test_loss))

                summary_test_loss = sess.run(merge_op, {value_loss: np.mean(test_loss)})
                writer_test.add_summary(summary_test_loss, i+1)

                predict_label = np.zeros((test_data_batch[0].shape[0], 2))
                predict_label[:, 0] = test_data_batch[1]
                predict_label[:, 1] = np.where(predict > 0.5, 1, 0)[:, 0]
                np.savetxt(os.path.join(FLAGS.outdir, 'R', 'predict', 'result_{}.csv'.format(i + 1)),
                           predict_label.astype(np.int), delimiter=',', fmt="%d")

            test_loss_min = np.argmin(np.asarray(test_loss_min))
            np.savetxt(os.path.join(FLAGS.outdir, 'R' ,'min_loss_index.txt'), [test_loss_min+1], fmt="%d")

        # Leave-one-out method (10-fold CV)
        if FLAGS.cv == True:
            kfold = StratifiedKFold(n_splits=FLAGS.kfold, shuffle=True, random_state=1)
            fold_index = 0

            # set network
            kwargs = {
                'sess': sess,
                'input_size': FLAGS.input_size,
                'learning_rate': 1e-3
            }
            NN = neural_network(**kwargs)

            # print parmeters
            utils.cal_parameter()
            for train, test in kfold.split(feature, label):
                fold_index += 1

                # check folder
                if not (os.path.exists(os.path.join(FLAGS.outdir, 'L', str(fold_index) , 'tensorboard'))):
                    os.makedirs(os.path.join(FLAGS.outdir, 'L', str(fold_index), 'tensorboard'))
                if not (os.path.exists(os.path.join(FLAGS.outdir, 'L', str(fold_index), 'model'))):
                    os.makedirs(os.path.join(FLAGS.outdir, 'L', str(fold_index), 'model'))
                if not (os.path.exists(os.path.join(FLAGS.outdir, 'L', str(fold_index), 'predict'))):
                    os.makedirs(os.path.join(FLAGS.outdir, 'L', str(fold_index), 'predict'))

                # prepare tensorboard
                writer_train = tf.summary.FileWriter(os.path.join(FLAGS.outdir, 'L', str(fold_index), 'tensorboard', 'train'),
                                                     sess.graph)
                writer_test = tf.summary.FileWriter(os.path.join(FLAGS.outdir, 'L', str(fold_index), 'tensorboard', 'test'))
                value_loss = tf.Variable(0.0)
                tf.summary.scalar("loss", value_loss)
                merge_op = tf.summary.merge_all()

                # initialize
                sess.run(tf.global_variables_initializer())

                # training
                tbar = tqdm(range(FLAGS.num_epoch), ascii=True)
                for i in tbar:
                    train_step, train_data = utils.batch_iter(feature[train], label[train],
                                                              batch_size=feature[train].shape[0], shuffle=True)
                    train_data_batch = next(train_data)

                    train_loss = NN.update(train_data_batch[0],
                                           np.reshape(train_data_batch[1], (train_data_batch[1].shape[0], 1)))
                    s = "Loss: {:.4f}".format(np.mean(train_loss))
                    tbar.set_description(s)

                    summary_train_loss = sess.run(merge_op, {value_loss: np.mean(train_loss)})
                    writer_train.add_summary(summary_train_loss, i + 1)

                    NN.save_model(i + 1, outdir=os.path.join(FLAGS.outdir, 'L', str(fold_index)))

                # test
                sess.run(init_op)
                test_loss_min = []
                test_step, test_data = utils.batch_iter(feature[test], label[test],
                                                        batch_size=feature[test].shape[0], shuffle=False)
                tbar = tqdm(range(FLAGS.num_epoch), ascii=True)
                for i in tbar:
                    NN.restore_model(os.path.join(FLAGS.outdir, 'L', str(fold_index) , 'model', 'model_{}'.format(i + 1)))
                    test_data_batch = next(test_data)
                    test_loss, predict = NN.test(test_data_batch[0],
                                                 np.reshape(test_data_batch[1], (test_data_batch[1].shape[0], 1)))
                    s = "Loss: {:.4f}".format(np.mean(test_loss))
                    tbar.set_description(s)
                    test_loss_min.append(np.mean(test_loss))

                    summary_test_loss = sess.run(merge_op, {value_loss: np.mean(test_loss)})
                    writer_test.add_summary(summary_test_loss, i + 1)

                    predict_label = np.zeros((test_data_batch[0].shape[0], 2))
                    predict_label[:, 0] = test_data_batch[1]
                    predict_label[:, 1] = np.where(predict > 0.5, 1, 0)[:,0]
                    np.savetxt(os.path.join(FLAGS.outdir, 'L', str(fold_index), 'predict', 'result_{}.csv'.format(i + 1)),
                               predict_label.astype(np.int), delimiter=',', fmt="%d")

                test_loss_min = np.argmin(np.asarray(test_loss_min))
                np.savetxt(os.path.join(FLAGS.outdir, 'L', str(fold_index),'min_loss_index.txt'), [test_loss_min+1], fmt="%d")
Esempio n. 4
0
def main():

    # tf flag
    flags = tf.flags
    flags.DEFINE_string(
        "val_data_txt", 'F:/data_info/VAE_liver/set_5/TFrecord/fold_1/val.txt',
        "validation data txt")
    flags.DEFINE_string(
        "model_dir",
        'G:/experiment_result/liver/VAE/set_5/down/64/alpha_0.1/fold_1/beta_10/model',
        "dir of model")
    flags.DEFINE_string(
        "outdir",
        'G:/experiment_result/liver/VAE/set_5/down/64/alpha_0.1/fold_1/beta_10',
        "outdir")
    flags.DEFINE_string("gpu_index", "0", "GPU-index")
    flags.DEFINE_float("beta", 1, "hyperparameter beta")
    flags.DEFINE_integer("num_of_val", 76, "number of validation data")
    flags.DEFINE_integer("train_iteration", 12001,
                         "number of training iteration")
    flags.DEFINE_integer("batch_size", 1, "batch size")
    flags.DEFINE_integer(
        "num_per_val", 150,
        "number per each validation(equal step of saving model)")
    flags.DEFINE_integer("latent_dim", 4, "latent dim")
    flags.DEFINE_list("image_size", [56, 72, 88, 1], "image size")
    FLAGS = flags.FLAGS

    # check folder
    if not (os.path.exists(os.path.join(FLAGS.outdir, 'tensorboard'))):
        os.makedirs(os.path.join(FLAGS.outdir, 'tensorboard'))

    # read list
    val_data_list = io.load_list(FLAGS.val_data_txt)

    # number of model
    num_of_model = FLAGS.train_iteration // FLAGS.num_per_val
    if FLAGS.train_iteration % FLAGS.num_per_val != 0:
        num_of_model += 1
    if FLAGS.train_iteration % FLAGS.num_per_val == 0:
        num_of_model -= 1

    # val_iter
    num_val_iter = FLAGS.num_of_val // FLAGS.batch_size
    if FLAGS.num_of_val % FLAGS.batch_size != 0:
        num_val_iter += 1

    # load validation data
    val_set = tf.data.TFRecordDataset(val_data_list, compression_type='GZIP')
    val_set = val_set.map(
        lambda x: utils._parse_function(x, image_size=FLAGS.image_size),
        num_parallel_calls=os.cpu_count())
    val_set = val_set.repeat()
    val_set = val_set.batch(FLAGS.batch_size)
    val_iter = val_set.make_one_shot_iterator()
    val_data = val_iter.get_next()

    # initializer
    init_op = tf.group(tf.initializers.global_variables(),
                       tf.initializers.local_variables())

    with tf.Session(config=utils.config(index=FLAGS.gpu_index)) as sess:
        # # set network
        kwargs = {
            'sess': sess,
            'outdir': FLAGS.outdir,
            'beta': FLAGS.beta,
            'latent_dim': FLAGS.latent_dim,
            'batch_size': FLAGS.batch_size,
            'image_size': FLAGS.image_size,
            'encoder': encoder_resblock_bn,
            'decoder': decoder_resblock_bn,
            'downsampling': down_sampling,
            'upsampling': up_sampling,
            'is_training': False,
            'is_down': False
        }
        VAE = Variational_Autoencoder(**kwargs)

        # print parmeters
        utils.cal_parameter()

        # prepare tensorboard
        writer_val = tf.summary.FileWriter(
            os.path.join(FLAGS.outdir, 'tensorboard', 'val'))
        writer_val_rec = tf.summary.FileWriter(
            os.path.join(FLAGS.outdir, 'tensorboard', 'val_rec'))
        writer_val_kl = tf.summary.FileWriter(
            os.path.join(FLAGS.outdir, 'tensorboard', 'val_kl'))

        value_loss = tf.Variable(0.0)
        tf.summary.scalar("loss", value_loss)
        merge_op = tf.summary.merge_all()

        # initialize
        sess.run(init_op)

        # # validation
        tbar = tqdm(range(num_of_model), ascii=True)
        for i in tbar:
            VAE.restore_model(FLAGS.model_dir +
                              '/model_{}'.format(i * FLAGS.num_per_val))

            val_loss_all = []
            val_rec_all = []
            val_kl_all = []
            for j in range(num_val_iter):
                val_data_batch = sess.run(val_data)
                val_loss, val_rec, val_kl = VAE.validation(val_data_batch)
                val_loss_all.append(val_loss)
                val_rec_all.append(val_rec)
                val_kl_all.append(val_kl)
            val_loss, val_rec, val_kl = np.mean(val_loss_all), np.mean(
                val_rec_all), np.mean(val_kl_all)
            s = "val: {:.4f}, val_rec: {:.4f}, val_kl: {:.4f} ".format(
                val_loss, val_rec, val_kl)
            tbar.set_description(s)

            summary_val = sess.run(merge_op, {value_loss: val_loss})
            summary_val_rec = sess.run(merge_op, {value_loss: val_rec})
            summary_val_kl = sess.run(merge_op, {value_loss: val_kl})
            writer_val.add_summary(summary_val, i * FLAGS.num_per_val)
            writer_val_rec.add_summary(summary_val_rec, i * FLAGS.num_per_val)
            writer_val_kl.add_summary(summary_val_kl, i * FLAGS.num_per_val)
            val_loss_all.clear()
            val_rec_all.clear()
            val_kl_all.clear()
Esempio n. 5
0
def main(argv):

    # turn off log message
    tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.FATAL)

    # check folder
    if not (os.path.exists(os.path.join(FLAGS.outdir, 'tensorboard'))):
        os.makedirs(os.path.join(FLAGS.outdir, 'tensorboard'))
    if not (os.path.exists(os.path.join(FLAGS.outdir, 'model'))):
        os.makedirs(os.path.join(FLAGS.outdir, 'model'))

    # load train data(cifar10, class: 10)
    if FLAGS.problem == 'cifar10':
        (x_train, y_train), (x_test,
                             y_test) = tf.keras.datasets.cifar10.load_data()
        num_class = 10
    # load train data(cifar100, class: 100)
    if FLAGS.problem == 'cifar100':
        (x_train, y_train), (x_test,
                             y_test) = tf.keras.datasets.cifar100.load_data(
                                 label_mode='fine')
        num_class = 100

    # preprocess
    train_gen = tf.keras.preprocessing.image.ImageDataGenerator(
        rescale=1.0 / 255,
        horizontal_flip=True,
        width_shift_range=4.0 / 32.0,
        height_shift_range=4.0 / 32.0)
    test_gen = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 /
                                                               255)

    # initializer
    init_op = tf.group(tf.initializers.global_variables(),
                       tf.initializers.local_variables())

    with tf.Session(config=utils.config(index=FLAGS.gpu_index)) as sess:

        if FLAGS.is_octconv:
            network = octconv_resnet50
        else:
            network = normal_resnet50

        # set network
        kwargs = {
            'sess': sess,
            'outdir': FLAGS.outdir,
            'input_size': FLAGS.image_size,
            'alpha': FLAGS.alpha,
            'network': network,
            'num_class': num_class,
            'is_training': True,
            'learning_rate': 1e-3
        }

        Model = resnet_model(**kwargs)

        utils.cal_parameter()

        # prepare tensorboard
        writer_train = tf.summary.FileWriter(
            os.path.join(FLAGS.outdir, 'tensorboard', 'train'), sess.graph)

        value_loss = tf.Variable(0.0)
        tf.summary.scalar("train_loss", value_loss)
        merge_op = tf.summary.merge_all()

        # initialize
        sess.run(init_op)

        tbar = tqdm(range(FLAGS.epoch), ascii=True)
        epoch_loss = []
        for i in tbar:
            train_data_shuffled = train_gen.flow(x_train,
                                                 y_train,
                                                 FLAGS.batch_size,
                                                 shuffle=True)

            # one epoch
            for iter in range(x_train.shape[0] // FLAGS.batch_size):
                train_data_batch = next(train_data_shuffled)

                label = tf.keras.utils.to_categorical(train_data_batch[1],
                                                      num_classes=num_class)

                # training
                train_loss = Model.update(train_data_batch[0], label)
                epoch_loss.append(np.mean(train_loss))

                s = "epoch:{}, step:{}, Loss: {:.4f}".format(
                    i, iter, np.mean(epoch_loss))
                tbar.set_description(s)

            summary_train_loss = sess.run(merge_op,
                                          {value_loss: np.mean(epoch_loss)})
            writer_train.add_summary(summary_train_loss, i)

            epoch_loss.clear()

            # save model
            Model.save_model(i)
Esempio n. 6
0
def main():
    parser = argparse.ArgumentParser(description='py, train_data_txt, val_data_txt, outdir')

    parser.add_argument('--train_data_txt', '-i1', default='', help='train data txt')

    parser.add_argument('--val_data_txt', '-i2', default='', help='validation data txt')

    parser.add_argument('--outdir', '-i3', default='./beta_0.1', help='outdir')

    args = parser.parse_args()

    # check folder
    if not (os.path.exists(os.path.join(args.outdir, 'tensorboard', 'train'))):
        os.makedirs(os.path.join(args.outdir, 'tensorboard', 'train'))
    if not (os.path.exists(os.path.join(args.outdir, 'tensorboard', 'val'))):
        os.makedirs(os.path.join(args.outdir, 'tensorboard', 'val'))
    if not (os.path.exists(os.path.join(args.outdir, 'tensorboard', 'rec'))):
        os.makedirs(os.path.join(args.outdir, 'tensorboard', 'rec'))
    if not (os.path.exists(os.path.join(args.outdir, 'tensorboard', 'kl'))):
        os.makedirs(os.path.join(args.outdir, 'tensorboard', 'kl'))
    if not (os.path.exists(os.path.join(args.outdir, 'model'))):
        os.makedirs(os.path.join(args.outdir, 'model'))

    # tf flag
    flags = tf.flags
    flags.DEFINE_float("beta", 0.1, "hyperparameter beta")
    flags.DEFINE_integer("num_of_val", 1000, "number of validation data")
    flags.DEFINE_integer("batch_size", 30, "batch size")
    flags.DEFINE_integer("num_iteration", 50001, "number of iteration")
    flags.DEFINE_integer("save_loss_step", 50, "step of save loss")
    flags.DEFINE_integer("save_model_step", 500, "step of save model and validation")
    flags.DEFINE_integer("shuffle_buffer_size", 10000, "buffer size of shuffle")
    flags.DEFINE_integer("latent_dim", 2, "latent dim")
    flags.DEFINE_list("image_size", [512, 512, 1], "image size")
    FLAGS = flags.FLAGS

    # read list
    train_data_list = io.load_list(args.train_data_txt)
    val_data_list = io.load_list(args.val_data_txt)

    # shuffle list
    random.shuffle(train_data_list)
    # val step
    val_step = FLAGS.num_of_val // FLAGS.batch_size
    if FLAGS.num_of_val % FLAGS.batch_size != 0:
        val_step += 1

    # load train data and validation data
    train_set = tf.data.TFRecordDataset(train_data_list)
    train_set = train_set.map(lambda x: _parse_function(x, image_size=FLAGS.image_size),
                              num_parallel_calls=os.cpu_count())
    train_set = train_set.shuffle(buffer_size=FLAGS.shuffle_buffer_size)
    train_set = train_set.repeat()
    train_set = train_set.batch(FLAGS.batch_size)
    train_iter = train_set.make_one_shot_iterator()
    train_data = train_iter.get_next()

    val_set = tf.data.TFRecordDataset(val_data_list)
    val_set = val_set.map(lambda x: _parse_function(x, image_size=FLAGS.image_size),
                          num_parallel_calls=os.cpu_count())
    val_set = val_set.repeat()
    val_set = val_set.batch(FLAGS.batch_size)
    val_iter = val_set.make_one_shot_iterator()
    val_data = val_iter.get_next()

    # initializer
    init_op = tf.group(tf.initializers.global_variables(),
                       tf.initializers.local_variables())

    with tf.Session(config = utils.config) as sess:
        # set network
        kwargs = {
            'sess': sess,
            'outdir': args.outdir,
            'beta': FLAGS.beta,
            'latent_dim': FLAGS.latent_dim,
            'batch_size': FLAGS.batch_size,
            'image_size': FLAGS.image_size,
            'encoder': cnn_encoder,
            'decoder': cnn_decoder
        }
        VAE = Variational_Autoencoder(**kwargs)

        # print parmeters
        utils.cal_parameter()

        # prepare tensorboard
        writer_train = tf.summary.FileWriter(os.path.join(args.outdir, 'tensorboard', 'train'), sess.graph)
        writer_val = tf.summary.FileWriter(os.path.join(args.outdir, 'tensorboard', 'val'))
        writer_rec = tf.summary.FileWriter(os.path.join(args.outdir, 'tensorboard', 'rec'))
        writer_kl = tf.summary.FileWriter(os.path.join(args.outdir, 'tensorboard', 'kl'))

        value_loss = tf.Variable(0.0)
        tf.summary.scalar("loss", value_loss)
        merge_op = tf.summary.merge_all()

        # initialize
        sess.run(init_op)

        # training
        tbar = tqdm(range(FLAGS.num_iteration), ascii=True)
        for i in tbar:
            train_data_batch = sess.run(train_data)
            train_loss, rec_loss, kl_loss = VAE.update(train_data_batch)

            if i % FLAGS.save_loss_step is 0:
                s = "Loss: {:.4f}, rec_loss: {:.4f}, kl_loss: {:.4f}".format(train_loss, rec_loss, kl_loss)
                tbar.set_description(s)
                summary_train_loss = sess.run(merge_op, {value_loss: train_loss})
                writer_train.add_summary(summary_train_loss, i)

                summary_rec_loss = sess.run(merge_op, {value_loss: rec_loss})
                summary_kl_loss = sess.run(merge_op, {value_loss: kl_loss})
                writer_rec.add_summary(summary_rec_loss, i)
                writer_kl.add_summary(summary_kl_loss, i)


            if i % FLAGS.save_model_step is 0:
                # save model
                VAE.save_model(i)

                # validation
                val_loss = 0.
                for j in range(val_step):
                    val_data_batch = sess.run(val_data)
                    val_loss += VAE.validation(val_data_batch)
                val_loss /= val_step

                summary_val = sess.run(merge_op, {value_loss: val_loss})
                writer_val.add_summary(summary_val, i)
Esempio n. 7
0
def main():

    # tf flag
    flags = tf.flags
    flags.DEFINE_string(
        "train_data_txt",
        'F:/data_info/VAE_liver/set_5/TFrecord/fold_1/train.txt',
        "train data txt")
    flags.DEFINE_string(
        "outdir",
        'G:/experiment_result/liver/VAE/set_5/down/64/alpha_0.1/fold_1/beta_1',
        "outdir")
    flags.DEFINE_string("gpu_index", "0", "GPU-index")
    flags.DEFINE_float("beta", 1, "hyperparameter beta")
    flags.DEFINE_integer("batch_size", 12, "batch size")
    flags.DEFINE_integer("num_iteration", 12001, "number of iteration")
    flags.DEFINE_integer("save_loss_step", 150, "step of save loss")
    flags.DEFINE_integer("save_model_step", 150,
                         "step of save model and validation")
    flags.DEFINE_integer("shuffle_buffer_size", 200, "buffer size of shuffle")
    flags.DEFINE_integer("latent_dim", 4, "latent dim")
    flags.DEFINE_list("image_size", [56, 72, 88, 1], "image size")
    FLAGS = flags.FLAGS

    # check folder
    if not (os.path.exists(os.path.join(FLAGS.outdir, 'tensorboard'))):
        os.makedirs(os.path.join(FLAGS.outdir, 'tensorboard'))
    if not (os.path.exists(os.path.join(FLAGS.outdir, 'model'))):
        os.makedirs(os.path.join(FLAGS.outdir, 'model'))

    # read list
    train_data_list = io.load_list(FLAGS.train_data_txt)
    # shuffle list
    random.shuffle(train_data_list)

    # load train data
    train_set = tf.data.Dataset.list_files(train_data_list)
    train_set = train_set.apply(
        tf.contrib.data.parallel_interleave(
            lambda x: tf.data.TFRecordDataset(x, compression_type='GZIP'),
            cycle_length=os.cpu_count()))
    train_set = train_set.map(
        lambda x: utils._parse_function(x, image_size=FLAGS.image_size),
        num_parallel_calls=os.cpu_count())
    # train_set = train_set.cache()
    train_set = train_set.shuffle(buffer_size=FLAGS.shuffle_buffer_size)
    train_set = train_set.repeat()
    train_set = train_set.batch(FLAGS.batch_size)
    train_iter = train_set.make_one_shot_iterator()
    train_data = train_iter.get_next()

    # initializer
    init_op = tf.group(tf.initializers.global_variables(),
                       tf.initializers.local_variables())

    with tf.Session(config=utils.config(index=FLAGS.gpu_index)) as sess:
        # # set network

        kwargs = {
            'sess': sess,
            'outdir': FLAGS.outdir,
            'beta': FLAGS.beta,
            'latent_dim': FLAGS.latent_dim,
            'batch_size': FLAGS.batch_size,
            'image_size': FLAGS.image_size,
            'encoder': encoder_resblock_bn,
            'decoder': decoder_resblock_bn,
            'downsampling': down_sampling,
            'upsampling': up_sampling,
            'learning_rate': 1e-4,
            'is_training': True,
            'is_down': False
        }
        VAE = Variational_Autoencoder(**kwargs)

        # print parmeters
        utils.cal_parameter()

        # prepare tensorboard
        writer_train = tf.summary.FileWriter(
            os.path.join(FLAGS.outdir, 'tensorboard', 'train'), sess.graph)
        writer_rec = tf.summary.FileWriter(
            os.path.join(FLAGS.outdir, 'tensorboard', 'train_rec'))
        writer_kl = tf.summary.FileWriter(
            os.path.join(FLAGS.outdir, 'tensorboard', 'train_kl'))

        value_loss = tf.Variable(0.0)
        tf.summary.scalar("loss", value_loss)
        merge_op = tf.summary.merge_all()

        # initialize
        sess.run(init_op)

        # # training
        tbar = tqdm(range(FLAGS.num_iteration), ascii=True)
        epoch_train_loss = []
        epoch_kl_loss = []
        epoch_rec_loss = []
        for i in tbar:
            train_data_batch = sess.run(train_data)

            train_loss, rec_loss, kl_loss = VAE.update(train_data_batch)
            epoch_train_loss.append(train_loss)
            epoch_kl_loss.append(kl_loss)
            epoch_rec_loss.append(rec_loss)

            if i % FLAGS.save_loss_step is 0:
                s = "Loss: {:.4f}, kl_loss: {:.4f}, rec_loss: {:.4f}"\
                    .format(np.mean(epoch_train_loss), np.mean(epoch_kl_loss), np.mean(epoch_rec_loss))
                tbar.set_description(s)

                summary_train_loss = sess.run(merge_op,
                                              {value_loss: train_loss})
                summary_rec_loss = sess.run(merge_op, {value_loss: rec_loss})
                summary_kl_loss = sess.run(merge_op, {value_loss: kl_loss})
                writer_train.add_summary(summary_train_loss, i)
                writer_rec.add_summary(summary_rec_loss, i)
                writer_kl.add_summary(summary_kl_loss, i)

                epoch_train_loss.clear()
                epoch_kl_loss.clear()
                epoch_rec_loss.clear()

            if i % FLAGS.save_model_step is 0:
                # save model
                VAE.save_model(i)
Esempio n. 8
0
def main(argv):

    # turn off log message
    tf.logging.set_verbosity(tf.logging.WARN)

    # check folder
    if not (os.path.exists(os.path.join(FLAGS.outdir, 'tensorboard'))):
        os.makedirs(os.path.join(FLAGS.outdir, 'tensorboard'))

    # load train data(cifar10, class: 10)
    if FLAGS.problem == 'cifar10':
        (x_train, y_train), (x_test,
                             y_test) = tf.keras.datasets.cifar10.load_data()
        num_class = 10
    # load train data(cifar100, class: 100)
    if FLAGS.problem == 'cifar100':
        (x_train, y_train), (x_test,
                             y_test) = tf.keras.datasets.cifar100.load_data(
                                 label_mode='fine')
        num_class = 100

    # preprocess
    test_gen = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 /
                                                               255)

    # initializer
    init_op = tf.group(tf.initializers.global_variables(),
                       tf.initializers.local_variables())

    with tf.Session(config=utils.config(index=FLAGS.gpu_index)) as sess:

        if FLAGS.is_octconv:
            network = octconv_resnet50
        else:
            network = normal_resnet50

        # set network
        kwargs = {
            'sess': sess,
            'outdir': FLAGS.outdir,
            'input_size': FLAGS.image_size,
            'alpha': FLAGS.alpha,
            'network': network,
            'num_class': num_class,
            'is_training': False,
            'learning_rate': 1e-4
        }

        Model = resnet_model(**kwargs)

        utils.cal_parameter()

        # prepare tensorboard
        writer_test = tf.summary.FileWriter(
            os.path.join(FLAGS.outdir, 'tensorboard', 'test'))

        value_acc = tf.Variable(0.0)
        tf.summary.scalar("test_accuracy", value_acc)
        merge_op = tf.summary.merge_all()

        # initialize
        sess.run(init_op)

        tbar = tqdm(range(FLAGS.epoch), ascii=True)
        epoch_acc = []
        for i in tbar:
            test_data = test_gen.flow(x_test,
                                      y_test,
                                      FLAGS.batch_size,
                                      shuffle=False)

            # one epoch
            Model.restore_model(FLAGS.model_path + '/model_{}'.format(i))
            for iter in range(x_test.shape[0] // FLAGS.batch_size):
                train_data_batch = next(test_data)

                label = tf.keras.utils.to_categorical(train_data_batch[1],
                                                      num_classes=num_class)

                test_acc = Model.test(train_data_batch[0], label)
                epoch_acc.append(np.mean(test_acc))

                s = "epoch:{}, acc: {:.4f}".format(i, np.mean(epoch_acc))
                tbar.set_description(s)

            summary_test_acc = sess.run(merge_op,
                                        {value_acc: np.mean(epoch_acc)})
            writer_test.add_summary(summary_test_acc, i)

            epoch_acc.clear()
def main(argv):

    # turn off log message
    tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.FATAL)

    # check folder
    if not os.path.exists(FLAGS.dir):
        raise Exception("model dirctory is not existed!")
    if not os.path.exists(os.path.join(FLAGS.dir, 'dice')):
        os.makedirs(os.path.join(FLAGS.dir, 'dice'))

    # get ground truth list
    ground_truth_list = io.load_list(FLAGS.ground_truth)

    # load ground truth
    ground_truth = io.load_data_from_path(ground_truth_list, dtype='int32')

    # get tfrecord list
    test_data_list = glob.glob(FLAGS.indir + '/*')

    # load test data
    test_set = tf.data.TFRecordDataset(test_data_list)
    test_set = test_set.map(lambda x: utils._parse_function_val_test(
        x, image_size=FLAGS.image_size),
                            num_parallel_calls=os.cpu_count())
    test_set = test_set.repeat()
    test_set = test_set.batch(FLAGS.batch_size)
    test_iter = test_set.make_one_shot_iterator()
    test_data = test_iter.get_next()

    # initializer
    init_op = tf.group(tf.initializers.global_variables(),
                       tf.initializers.local_variables())

    with tf.Session(config=utils.config(index=FLAGS.gpu_index)) as sess:
        # set network
        kwargs = {
            'sess': sess,
            'latent_dim': FLAGS.latent_dim,
            'scale_lambda': FLAGS.scale_lambda,
            'scale_kappa': FLAGS.scale_kappa,
            'scale_psi': FLAGS.scale_psi,
            'image_size': FLAGS.image_size,
            'points_num': FLAGS.points_num,
            'k_size': FLAGS.k_size,
            'encoder_layer': encoder_layer,
            'points_encoder_layer': points_encoder_layer,
            'generator_layer': generator_layer,
            'discriminator_layer': discriminator_layer,
            'code_discriminator_layer': code_discriminator_layer,
            'is_training': False
        }

        Model = conditional_alphaGAN(**kwargs)

        sess.run(init_op)

        # print parameters
        utils.cal_parameter()

        # test
        dice_list = []

        Model.restore_model(FLAGS.dir +
                            '/model/model_{}'.format(FLAGS.model_index))
        for i in range(FLAGS.num_of_test):
            _, test_points_batch, _ = sess.run(test_data)

            np.random.seed(4)

            tbar = tqdm(range(FLAGS.num_of_generate // FLAGS.batch_size),
                        ascii=True)
            for j in tbar:

                z = np.random.normal(0.,
                                     1.,
                                     size=[FLAGS.batch_size, FLAGS.latent_dim])
                # z = utils.truncated_noise_sample(FLAGS.batch_size, FLAGS.latent_dim, truncation=2.0)
                generate_batch = Model.generate_sample(z, test_points_batch)

                # save logodds
                generate_batch_ = np.asarray(generate_batch)
                generate_batch_ = generate_batch_[0, :, :, :]
                for image_index in range(generate_batch_.shape[0]):
                    gen = generate_batch_[image_index][:, :, :, 0]
                    io.write_mhd_and_raw(
                        gen,
                        '{}.mhd'.format(
                            os.path.join(
                                FLAGS.dir, 'dice', '{}'.format(i),
                                '{}'.format(j * FLAGS.batch_size +
                                            image_index))),
                        spacing=[1, 1, 1],
                        origin=[0, 0, 0],
                        compress=True)

                if j is 0:
                    data = np.asarray(generate_batch)[0]
                    label = np.where(data > 0.5, 0, 1)
                    label = label.astype(np.int8)
                    pa = np.sum(label, axis=0)
                else:
                    data = np.asarray(generate_batch)[0]
                    label_ = np.where(data > 0.5, 0, 1)
                    label_ = label_.astype(np.int8)
                    pa = pa + np.sum(label_, axis=0)

            pa = pa / float(FLAGS.num_of_generate)
            pa = pa.astype(np.float32)

            # output image
            io.write_mhd_and_raw(pa,
                                 '{}_{}.mhd'.format(
                                     os.path.join(FLAGS.dir, 'dice', 'PA'), i),
                                 spacing=[1, 1, 1],
                                 origin=[0, 0, 0],
                                 compress=True)

            # dice
            gt = ground_truth[i]
            gt = gt.astype(np.float32)
            dice = utils.dice_coef(gt, pa)
            dice_list.append([round(dice, 6)])
            print(dice)

        print('dice = %f' % np.mean(dice_list))
        # write csv
        io.write_csv(
            dice_list,
            os.path.join(FLAGS.dir, 'dice',
                         'dice_{}.csv'.format(FLAGS.model_index)), 'dice')
def main(argv):

    # turn off log message
    tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.FATAL)

    # check folder
    if not os.path.exists(FLAGS.dir):
        raise Exception("model dirctory is not existed!")
    if not os.path.exists(os.path.join(FLAGS.dir, 'generalization')):
        os.makedirs(os.path.join(FLAGS.dir, 'generalization'))

    # get tfrecord list
    test_data_list = glob.glob(FLAGS.indir + '/*')

    # test step
    test_step = FLAGS.num_of_test // FLAGS.batch_size
    if FLAGS.num_of_test % FLAGS.batch_size != 0:
        test_step += 1

    # load test data
    test_set = tf.data.TFRecordDataset(test_data_list)
    test_set = test_set.map(lambda x: utils._parse_function_val_test(
        x, image_size=FLAGS.image_size),
                            num_parallel_calls=os.cpu_count())
    test_set = test_set.batch(FLAGS.batch_size)
    test_iter = test_set.make_one_shot_iterator()
    test_data = test_iter.get_next()

    # initializer
    init_op = tf.group(tf.initializers.global_variables(),
                       tf.initializers.local_variables())

    with tf.Session(config=utils.config(index=FLAGS.gpu_index)) as sess:
        # set network
        kwargs = {
            'sess': sess,
            'latent_dim': FLAGS.latent_dim,
            'scale_lambda': FLAGS.scale_lambda,
            'scale_kappa': FLAGS.scale_kappa,
            'scale_psi': FLAGS.scale_psi,
            'image_size': FLAGS.image_size,
            'points_num': FLAGS.points_num,
            'k_size': FLAGS.k_size,
            'encoder_layer': encoder_layer,
            'points_encoder_layer': points_encoder_layer,
            'generator_layer': generator_layer,
            'discriminator_layer': discriminator_layer,
            'code_discriminator_layer': code_discriminator_layer,
            'is_training': False
        }

        Model = conditional_alphaGAN(**kwargs)

        sess.run(init_op)

        # print parameters
        utils.cal_parameter()

        # test
        Model.restore_model(FLAGS.dir +
                            '/model/model_{}'.format(FLAGS.model_index))
        tbar = tqdm(range(test_step), ascii=True)
        for i in tbar:
            test_image_batch, test_points_batch, test_label_batch = sess.run(
                test_data)
            reconstruction_batch = Model.reconstruction(
                test_image_batch, test_points_batch)

            # dilation of points
            test_points_batch = tf.keras.layers.MaxPooling3D(
                pool_size=5, strides=1, padding='same')(test_points_batch)
            test_points_batch = test_points_batch.eval()
            test_points_batch = test_points_batch * 2  # scaling

            if i is 0:
                test_label = np.asarray(test_label_batch)
                reconstruction = np.asarray(reconstruction_batch)[0]
                points = np.asarray(test_points_batch)
            else:
                test_label = np.concatenate(
                    (test_label, np.asarray(test_label_batch)), axis=0)
                reconstruction = np.concatenate(
                    (reconstruction, np.asarray(reconstruction_batch)[0]),
                    axis=0)
                points = np.concatenate((points, np.array(test_points_batch)),
                                        axis=0)

        # calculate Jaccard Index and output images
        generalization = []
        tbar = tqdm(range(reconstruction.shape[0]), ascii=True)
        for i in tbar:
            test_label_single = test_label[i][:, :, :, 0]
            reconstruction_single = reconstruction[i][:, :, :, 0]
            points_single = points[i][:, :, :, 0]

            # label
            rec_label = np.where(reconstruction_single > 0.5, 0, 1)
            rec_label = rec_label.astype(np.int8)

            # calculate ji
            generalization.append(
                [utils.jaccard(rec_label, test_label_single)])

            # label and points
            label_and_points = rec_label + points_single

            rec_label = rec_label.astype(np.int8)
            label_and_points = label_and_points.astype(np.int8)

            # output image
            io.write_mhd_and_raw(reconstruction_single,
                                 '{}.mhd'.format(
                                     os.path.join(FLAGS.dir, 'generalization',
                                                  'logodds',
                                                  'generate_{}'.format(i))),
                                 spacing=[1, 1, 1],
                                 origin=[0, 0, 0],
                                 compress=True)
            io.write_mhd_and_raw(rec_label,
                                 '{}.mhd'.format(
                                     os.path.join(FLAGS.dir, 'generalization',
                                                  'predict',
                                                  'recon_{}'.format(i))),
                                 spacing=[1, 1, 1],
                                 origin=[0, 0, 0],
                                 compress=True)
            io.write_mhd_and_raw(label_and_points,
                                 '{}.mhd'.format(
                                     os.path.join(FLAGS.dir, 'generalization',
                                                  'label_and_points',
                                                  'generate_{}'.format(i))),
                                 spacing=[1, 1, 1],
                                 origin=[0, 0, 0],
                                 compress=True)

        print('generalization = %f' % np.mean(generalization))

        # write csv
        io.write_csv(
            generalization,
            os.path.join(FLAGS.dir, 'generalization',
                         'generalization_val_{}.csv'.format(
                             FLAGS.model_index)), 'generalization')
Esempio n. 11
0
def main(argv):

    # turn off log message
    tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.FATAL)

    # check folder
    if not os.path.exists(FLAGS.dir):
        raise Exception("model dirctory is not existed!")
    if not os.path.exists(os.path.join(FLAGS.dir, 'specificity')):
        os.makedirs(os.path.join(FLAGS.dir, 'specificity'))

    # load ground truth
    ground_truth_list = glob.glob(FLAGS.ground_truth + '/*.mhd')
    ground_truth = io.load_data_from_path(ground_truth_list, dtype='int32')

    # get tfrecord list
    test_data_list = glob.glob(FLAGS.indir + '/*')

    # load test data
    test_set = tf.data.TFRecordDataset(test_data_list)
    test_set = test_set.map(lambda x: utils._parse_function_val_test(
        x, image_size=FLAGS.image_size),
                            num_parallel_calls=os.cpu_count())
    # test_set = test_set.shuffle(buffer_size=FLAGS.num_of_test)
    test_set = test_set.repeat()
    test_set = test_set.batch(FLAGS.batch_size)
    test_iter = test_set.make_one_shot_iterator()
    test_data = test_iter.get_next()

    # initializer
    init_op = tf.group(tf.initializers.global_variables(),
                       tf.initializers.local_variables())

    with tf.Session(config=utils.config(index=FLAGS.gpu_index)) as sess:
        # set network
        kwargs = {
            'sess': sess,
            'latent_dim': FLAGS.latent_dim,
            'scale_lambda': FLAGS.scale_lambda,
            'scale_kappa': FLAGS.scale_kappa,
            'scale_psi': FLAGS.scale_psi,
            'image_size': FLAGS.image_size,
            'points_num': FLAGS.points_num,
            'k_size': FLAGS.k_size,
            'encoder_layer': encoder_layer,
            'points_encoder_layer': points_encoder_layer,
            'generator_layer': generator_layer,
            'discriminator_layer': discriminator_layer,
            'code_discriminator_layer': code_discriminator_layer,
            'is_training': False
        }

        Model = conditional_alphaGAN(**kwargs)

        sess.run(init_op)

        # print parameters
        utils.cal_parameter()

        # test
        Model.restore_model(FLAGS.dir +
                            '/model/model_{}'.format(FLAGS.model_index))

        tbar = tqdm(range(FLAGS.num_of_generate // FLAGS.batch_size),
                    ascii=True)
        for i in tbar:
            np.random.seed(4)

            z = np.random.normal(0.,
                                 1.,
                                 size=[FLAGS.batch_size, FLAGS.latent_dim])

            _, test_points_batch, _ = sess.run(test_data)
            generate_batch = Model.generate_sample(z, test_points_batch)

            # dilation of points
            test_points_dilate = tf.keras.layers.MaxPooling3D(
                pool_size=3, strides=1, padding='same')(test_points_batch)
            test_points_dilate = test_points_dilate.eval()
            test_points_dilate = test_points_dilate * 2  # scaling

            if i is 0:
                samples = np.asarray(generate_batch)[0]
                points = np.asarray(test_points_dilate)
            else:
                samples = np.concatenate(
                    (samples, np.asarray(generate_batch)[0]), axis=0)
                points = np.concatenate(
                    (points, np.asarray(test_points_dilate)), axis=0)

        # calculate Jaccard Index and output images
        specificity = []
        tbar = tqdm(range(samples.shape[0]), ascii=True)
        for i in tbar:
            gen = samples[i][:, :, :, 0]
            points_single = points[i][:, :, :, 0]

            # label
            gen_label = np.where(gen > 0.5, 0, 1)

            # calculate ji
            case_max_ji = 0.
            for image_index in range(ground_truth.shape[0]):
                ji = utils.jaccard(gen_label, ground_truth[image_index])
                if ji > case_max_ji:
                    case_max_ji = ji
            specificity.append([case_max_ji])

            # label and points
            label_and_points = gen_label + points_single

            gen_label = gen_label.astype(np.int8)
            label_and_points = label_and_points.astype(np.int8)

            # output image
            io.write_mhd_and_raw(gen,
                                 '{}.mhd'.format(
                                     os.path.join(FLAGS.dir, 'specificity',
                                                  'logodds',
                                                  'generate_{}'.format(i))),
                                 spacing=[1, 1, 1],
                                 origin=[0, 0, 0],
                                 compress=True)
            io.write_mhd_and_raw(gen_label,
                                 '{}.mhd'.format(
                                     os.path.join(FLAGS.dir, 'specificity',
                                                  'label',
                                                  'generate_{}'.format(i))),
                                 spacing=[1, 1, 1],
                                 origin=[0, 0, 0],
                                 compress=True)
            io.write_mhd_and_raw(label_and_points,
                                 '{}.mhd'.format(
                                     os.path.join(FLAGS.dir, 'specificity',
                                                  'label_and_points',
                                                  'generate_{}'.format(i))),
                                 spacing=[1, 1, 1],
                                 origin=[0, 0, 0],
                                 compress=True)
        #
        print('specificity = %f' % np.mean(specificity))

        # write csv
        io.write_csv(
            specificity,
            os.path.join(FLAGS.dir, 'specificity_shape',
                         'specificity_{}.csv'.format(FLAGS.model_index)),
            'specificity')
Esempio n. 12
0
def main(argv):
    # check folder
    if not (os.path.exists(os.path.join(FLAGS.outdir, 'tensorboard'))):
        os.makedirs(os.path.join(FLAGS.outdir, 'tensorboard'))
    if not (os.path.exists(os.path.join(FLAGS.outdir, 'model'))):
        os.makedirs(os.path.join(FLAGS.outdir, 'model'))

    # get file list
    train_data_list = glob.glob(FLAGS.indir + '/*')
    # shuffle list
    random.shuffle(train_data_list)

    # load train data
    train_set = tf.data.Dataset.list_files(train_data_list)
    train_set = train_set.apply(
        tf.contrib.data.parallel_interleave(
            lambda x: tf.data.TFRecordDataset(x), cycle_length=os.cpu_count()))
    train_set = train_set.map(utils._parse_function,
                              num_parallel_calls=os.cpu_count())
    train_set = train_set.shuffle(buffer_size=FLAGS.shuffle_buffer_size)
    train_set = train_set.repeat()
    train_set = train_set.batch(FLAGS.batch_size)
    train_set = train_set.prefetch(1)
    train_iter = train_set.make_one_shot_iterator()
    train_data = train_iter.get_next()

    # step of each epoch
    if FLAGS.num_data % FLAGS.batch_size == 0:
        step_of_epoch = FLAGS.num_data / FLAGS.batch_size
    else:
        step_of_epoch = FLAGS.num_data // FLAGS.batch_size + 1

    # initializer
    init_op = tf.group(tf.initializers.global_variables(),
                       tf.initializers.local_variables())

    with tf.Session(config=utils.config(index=FLAGS.gpu_index)) as sess:

        if FLAGS.is_octconv:
            network = octconv_resnet50
        else:
            network = normal_resnet50

        # set network
        kwargs = {
            'sess': sess,
            'outdir': FLAGS.outdir,
            'input_size': FLAGS.image_size,
            'alpha': FLAGS.alpha,
            'network': network,
            'is_training': True,
            'learning_rate': 1e-4
        }

        Model = resnet_model(**kwargs)

        utils.cal_parameter()

        # prepare tensorboard
        writer_train = tf.summary.FileWriter(
            os.path.join(FLAGS.outdir, 'tensorboard', 'train'), sess.graph)

        value_loss = tf.Variable(0.0)
        tf.summary.scalar("train_loss", value_loss)
        merge_op = tf.summary.merge_all()

        # initialize
        sess.run(init_op)

        tbar = tqdm(range(FLAGS.epoch), ascii=True)
        epoch_loss = []
        for i in tbar:

            # one epoch
            for step in range(step_of_epoch):
                train_data_batch, train_label_batch = sess.run(train_data)
                train_loss = Model.update(train_data_batch, train_label_batch)
                epoch_loss.append(train_loss)
                s = "epoch:{}, step:{}, Loss: {:.4f}".format(
                    i, step, np.mean(epoch_loss))
                tbar.set_description(s)

            summary_train_loss = sess.run(merge_op,
                                          {value_loss: np.mean(epoch_loss)})
            writer_train.add_summary(summary_train_loss, i)

            epoch_loss.clear()

            # save model
            Model.save_model(i)
Esempio n. 13
0
def main(argv):

    # turn off log message
    tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.FATAL)

    # check folder
    if not os.path.exists(os.path.join(FLAGS.outdir, 'tensorboard')):
        os.makedirs(os.path.join(FLAGS.outdir, 'tensorboard'))
    if not os.path.exists(os.path.join(FLAGS.outdir, 'model')):
        os.makedirs(os.path.join(FLAGS.outdir, 'model'))

    (x_train, _), (_, _) = tf.keras.datasets.mnist.load_data()
    x_train = np.expand_dims(x_train, axis=-1)

    # preprocess
    train_gen = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 /
                                                                255.)

    # tensorboard variable
    loss_value = tf.Variable(0.)
    tf.summary.scalar("loss", loss_value)
    merge_op = tf.summary.merge_all()

    # initializer
    init_op = tf.group(tf.initializers.global_variables(),
                       tf.initializers.local_variables())

    with tf.Session(config=utils.config(index=FLAGS.gpu_index)) as sess:

        # set network
        kwars = {
            'sess': sess,
            'noise_dim': FLAGS.noise_dim,
            'image_size': FLAGS.image_size,
            'generator_layer': generator_layer,
            'discriminator_layer': discriminator_layer,
            'is_training': True
        }

        Model = DCGAN(**kwars)

        utils.cal_parameter()

        # prepare tensorboard
        writer_train_g = tf.summary.FileWriter(
            os.path.join(FLAGS.outdir, 'tensorboard', 'train_g'), sess.graph)
        writer_train_d = tf.summary.FileWriter(
            os.path.join(FLAGS.outdir, 'tensorboard', 'train_d'))

        # initialize
        sess.run(init_op)

        tbar = tqdm(range(FLAGS.epoch), ascii=True)
        epoch_loss_g = []
        epoch_loss_d = []

        for step in tbar:
            train_data_shuffled = train_gen.flow(x_train,
                                                 y=None,
                                                 batch_size=FLAGS.batch_size,
                                                 shuffle=True)

            # one epoch
            for iter in range(x_train.shape[0] // FLAGS.batch_size):
                train_data_batch = next(train_data_shuffled)

                noise = np.random.uniform(
                    -1., 1., size=[FLAGS.batch_size, FLAGS.noise_dim])
                # training
                d_loss = Model.update_d(noise, train_data_batch)
                g_loss = Model.update_g(noise)

                epoch_loss_d.append(d_loss)
                epoch_loss_g.append(g_loss)

            s = "epoch:{}, loss_d:{:.4f}, loss_g:{:.4f}".format(
                step + 1, np.mean(epoch_loss_d), np.mean(epoch_loss_g))
            tbar.set_description(s)

            sum_d_loss = sess.run(merge_op,
                                  {loss_value: np.mean(epoch_loss_d)})
            sum_g_loss = sess.run(merge_op,
                                  {loss_value: np.mean(epoch_loss_g)})

            writer_train_d.add_summary(sum_d_loss, step)
            writer_train_g.add_summary(sum_g_loss, step)

            epoch_loss_d.clear()
            epoch_loss_g.clear()

            # save model
            Model.save_model(FLAGS.outdir, step)
def main(argv):

    # turn off log message
    tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.FATAL)

    # check folder
    if not os.path.exists(os.path.join(FLAGS.dir, 'tensorboard')):
        os.makedirs(os.path.join(FLAGS.dir, 'tensorboard'))
    if not os.path.exists(FLAGS.dir):
        raise Exception("model dirctory is not existed!")

    # get tfrecord list
    val_data_list = glob.glob(FLAGS.indir + '/*')

    # get ground truth list
    ground_truth_list = glob.glob(FLAGS.ground_truth + '/*.mhd')

    # load ground truth
    ground_truth = io.load_data_from_path(ground_truth_list, dtype='int32')

    # number of model
    num_of_model = FLAGS.train_iteration // FLAGS.save_model_step
    num_of_model = num_of_model + 1 if FLAGS.train_iteration % FLAGS.save_model_step is not 0 else num_of_model - 1

    # val_iter
    num_val_iter = FLAGS.num_of_val // FLAGS.batch_size
    if FLAGS.num_of_val % FLAGS.batch_size != 0:
        num_val_iter += 1

    # load val data
    val_set = tf.data.Dataset.list_files(val_data_list)
    val_set = val_set.apply(
        tf.data.experimental.parallel_interleave(
            lambda x: tf.data.TFRecordDataset(x), cycle_length=os.cpu_count()))
    val_set = val_set.map(lambda x: utils._parse_function_val_test(
        x, image_size=FLAGS.image_size),
                          num_parallel_calls=os.cpu_count())
    val_set = val_set.repeat()
    val_set = val_set.batch(FLAGS.batch_size)
    val_set = val_set.make_one_shot_iterator()
    val_data = val_set.get_next()

    # initializer
    init_op = tf.group(tf.initializers.global_variables(),
                       tf.initializers.local_variables())

    with tf.Session(config=utils.config(index=FLAGS.gpu_index)) as sess:
        # set network
        kwargs = {
            'sess': sess,
            'latent_dim': FLAGS.latent_dim,
            'scale_lambda': FLAGS.scale_lambda,
            'scale_kappa': FLAGS.scale_kappa,
            'scale_psi': FLAGS.scale_psi,
            'image_size': FLAGS.image_size,
            'points_num': FLAGS.points_num,
            'k_size': FLAGS.k_size,
            'encoder_layer': encoder_layer,
            'points_encoder_layer': points_encoder_layer,
            'generator_layer': generator_layer,
            'discriminator_layer': discriminator_layer,
            'code_discriminator_layer': code_discriminator_layer,
            'is_training': False
        }

        Model = conditional_alphaGAN(**kwargs)

        # print parameters
        utils.cal_parameter()

        # prepare tensorboard
        writer_gen = tf.summary.FileWriter(
            os.path.join(FLAGS.dir, 'tensorboard', 'val_generalization'))
        writer_spe = tf.summary.FileWriter(
            os.path.join(FLAGS.dir, 'tensorboard', 'val_specificity'))
        writer_val_ls = tf.summary.FileWriter(
            os.path.join(FLAGS.dir, 'tensorboard', 'val_ls'))
        writer_val_eikonal = tf.summary.FileWriter(
            os.path.join(FLAGS.dir, 'tensorboard', 'val_eikonal'))
        writer_all = tf.summary.FileWriter(
            os.path.join(FLAGS.dir, 'tensorboard',
                         'val_all'))  # mean of generalization and specificity

        # saving loss operation
        value_loss = tf.Variable(0.0)
        tf.summary.scalar("evaluation", value_loss)
        merge_op = tf.summary.merge_all()

        # initialize
        sess.run(init_op)

        # validation
        tbar = tqdm(range(num_of_model), ascii=True)
        for step in tbar:
            Model.restore_model(
                FLAGS.dir +
                '/model/model_{}'.format(step * FLAGS.save_model_step))

            generalization, specificity, val_ls, val_eikonal = [], [], [], []
            points = []
            real_image = []

            # reconstruction
            for i in range(num_val_iter):
                val_image_batch, val_points_batch, val_label_batch = sess.run(
                    val_data)
                points.append(val_points_batch)
                real_image.append(val_image_batch)

                reconstruction_batch = Model.reconstruction(
                    val_image_batch, val_points_batch)

                if i is 0:
                    val_label = np.asarray(val_label_batch)
                    reconstruction = np.asarray(reconstruction_batch)[0]
                else:
                    val_label = np.concatenate(
                        (val_label, np.asarray(val_label_batch)), axis=0)
                    reconstruction = np.concatenate(
                        (reconstruction, np.asarray(reconstruction_batch)[0]),
                        axis=0)

            # calculate generalization
            for i in range(reconstruction.shape[0]):
                val_label_single = val_label[i][:, :, :, 0]
                reconstruction_single = reconstruction[i][:, :, :, 0]

                # label
                rec_label = np.where(reconstruction_single > 0.5, 0, 1)

                # calculate ji
                generalization.append(
                    [utils.jaccard(rec_label, val_label_single)])

            # samples from latent space
            points_ls = np.ones_like(points) * 0.5
            for i in range(FLAGS.num_of_generate // FLAGS.batch_size):
                shuffle_fun(points_ls, points)

                z = np.random.normal(0.,
                                     1.,
                                     size=[FLAGS.batch_size, FLAGS.latent_dim])
                generate_batch, level_set_loss, eikonal_loss = Model.validation_specificity(
                    points_ls[random.randint(0, num_val_iter - 1)], z,
                    points[random.randint(0, num_val_iter - 1)])

                val_ls.append(level_set_loss)
                val_eikonal.append(eikonal_loss)

                if i is 0:
                    samples = np.asarray(generate_batch)
                else:
                    samples = np.concatenate(
                        (samples, np.asarray(generate_batch)), axis=0)

            # calculate specificity
            for i in range(samples.shape[0]):
                gen = samples[i][:, :, :, 0]

                # label
                gen_label = np.where(gen > 0.5, 0, 1)

                # calculate ji
                case_max_ji = 0.
                for image_index in range(ground_truth.shape[0]):
                    ji = utils.jaccard(gen_label, ground_truth[image_index])
                    if ji > case_max_ji:
                        case_max_ji = ji
                specificity.append([case_max_ji])

            s = "val_generalization: {:.4f}, val_specificity: {:.4f}, ls: {:.4f}, eikonal: {:.4f}, mean: {:.4f}".format(
                np.mean(generalization), np.mean(specificity), np.mean(val_ls),
                np.mean(val_eikonal),
                (np.mean(generalization) + np.mean(specificity)) / 2.)

            tbar.set_description(s)

            summary_gen = sess.run(merge_op,
                                   {value_loss: np.mean(generalization)})
            summary_spe = sess.run(merge_op,
                                   {value_loss: np.mean(specificity)})
            summary_ls = sess.run(merge_op, {value_loss: np.mean(val_ls)})
            summary_eikonal = sess.run(merge_op,
                                       {value_loss: np.mean(val_eikonal)})
            summary_all = sess.run(merge_op, {
                value_loss:
                (np.mean(generalization) + np.mean(specificity)) / 2.
            })

            writer_gen.add_summary(summary_gen, step * FLAGS.save_model_step)
            writer_spe.add_summary(summary_spe, step * FLAGS.save_model_step)
            writer_val_ls.add_summary(summary_ls, step * FLAGS.save_model_step)
            writer_val_eikonal.add_summary(summary_eikonal,
                                           step * FLAGS.save_model_step)
            writer_all.add_summary(summary_all, step * FLAGS.save_model_step)

            generalization.clear()
            specificity.clear()
            val_ls.clear()
            val_eikonal.clear()
            points.clear()
            real_image.clear()