예제 #1
0
def train_normal():
    simple_train_savepath = "/home/oole/tf_normal_save/tfnet_full"

    initial_epoch = 0

    train_datapath = "/home/oole/Data/training/patient_patches_jpg"
    # train_datapath = '/home/oole/tf_test_data/validation'
    val_datapath = "/home/oole/Data/validation/patient_patches_jpg"

    logfile_path = "/home/oole/tfnetsave/tfnet_em_full_log.csv"
    logreg_savepath = "/home/oole/tfnetsave/tfnet_em_full_logreg"

    model_name = "model"

    train_slidelist, train_slide_dimensions, old_disc_patches, _ = data_tf.collect_data(
        train_datapath, batch_size)
    val_slidelist, _, _, _ = data_tf.collect_data(val_datapath, batch_size)

    train_patches = dataset.slidelist_to_patchlist(train_slidelist)
    val_patches = dataset.slidelist_to_patchlist(val_slidelist)
    np.random.shuffle(train_patches)
    np.random.shuffle(val_patches)

    train_accuracy, val_accuracy = train.train_net(
        train_patches,
        val_patches,
        num_epochs=20,
        batch_size=batch_size,
        savepath=simple_train_savepath,
        do_augment=True,
        model_name=model_name)
예제 #2
0
def train_augment():
    # simple_train_savepath = "/home/oole/tfnetsave/tfnet_full"
    simple_train_savepath = "/home/oole/tfnetsave/tfnet_em_full"
    em_train_savepath = "/home/oole/tfnetsave/tfnet_em_full"

    initial_epoch = 0

    train_datapath = "/home/oole/Data/training/patient_patches_jpg"
    # train_datapath = '/home/oole/tf_test_data/validation'
    val_datapath = "/home/oole/Data/validation/patient_patches_jpg"

    logfile_path = "/home/oole/tfnetsave/tfnet_em_full_log.csv"
    logreg_savepath = "/home/oole/tfnetsave/tfnet_em_full_logreg"

    model_name = "model"

    label_encoder = data_tf.labelencoder()

    train_slidelist, train_slide_dimensions, old_disc_patches, _ = data_tf.collect_data(
        train_datapath, batch_size)
    val_slidelist, _, _, _ = data_tf.collect_data(val_datapath, batch_size)

    train_patches = dataset.slidelist_to_patchlist(train_slidelist)
    val_patches = dataset.slidelist_to_patchlist(val_slidelist)
    np.random.shuffle(train_patches)
    np.random.shuffle(val_patches)

    # Initial training
    #train_accuracy, val_accuracy = train.train_net(train_patches, val_patches, num_epochs=2, batch_size=batch_size,
    #                                               savepath=simple_train_savepath, do_augment=True, model_name=model_name)

    #util.write_log_file(logfile_path, train_accuracy=train_accuracy, val_accuracy=val_accuracy)

    # Test continue training
    # train.train_net(train_patches, val_patches, num_epochs=2, batch_size=batch_size, savepath=simple_train_savepath,
    #                 loadpath=simple_train_savepath, do_augment=False, model_name="model")

    train_em.emtrain(train_datapath,
                     val_datapath,
                     simple_train_savepath,
                     em_train_savepath,
                     label_encoder,
                     batch_size,
                     initial_epochnum=initial_epoch,
                     model_name=model_name,
                     spatial_smoothing=False,
                     do_augment=True,
                     num_epochs=2,
                     dropout_ratio=0.5,
                     learning_rate=0.0005,
                     sanity_check=False,
                     logfile_path=logfile_path,
                     logreg_savepath=logreg_savepath)
예제 #3
0
def test_existing_net(slide_datapath, net_loadpath, model_name, dropout_ratio, batch_size, do_augment=False, shuffle_buffer_size=2000):

    slide_list, slide_dimensions, num_patches, slide_label = data_tf.collect_data(
        slide_datapath, batch_size)

    patches = dataset.slidelist_to_patchlist(slide_list)

    with tf.Session() as sess:
        if do_augment:
            input_dataset = dataset.img_dataset_augment(patches, batch_size=batch_size,
                                                        shuffle_buffer_size=shuffle_buffer_size, shuffle=True)
        else:
            input_dataset = dataset.img_dataset(patches, batch_size=batch_size,
                                                shuffle_buffer_size=shuffle_buffer_size, shuffle=True)

        input_iterator = input_dataset.make_initializable_iterator()

        iterator_handle = sess.run(input_iterator.string_handle())

        proxy_iterator_handle_ph = tf.placeholder(tf.string, shape=[])
        proxy_iterator = tf.data.Iterator.from_string_handle(proxy_iterator_handle_ph,
                                                             output_types=input_iterator.output_types,
                                                             output_shapes=input_iterator.output_shapes)

        x, y = proxy_iterator.get_next()

        train_op, loss_op, y, accuracy_op, x, keep_prob_ph, learning_rate_ph, is_training_ph, y_pred_op, y_argmax_op = \
            netutil.build_model(model_name, x, y, use_bn_1=True, use_bn_2=True, use_dropout_1=True, use_dropout_2=True)

        # model saver
        saver = tf.train.Saver()
        ########################
        # load model from disc
        saver.restore(sess, net_loadpath)



        validate.validate_existing_net(iterator_handle, input_iterator, num_patches,
                                  dropout_ratio, batch_size,
                                  loss_op, accuracy_op,
                                  keep_prob_ph, is_training_ph, proxy_iterator_handle_ph, sess)



# net_loadpath = "/home/oole/tfnetsave/tfnet_em_full_premod"
# slide_datapath = "/home/oole/Data/training/patient_patches_premod_jpg"
# model_name="model"
# dropout_ratio=0.5
# batch_size=64
# shuffle_buffer_size=2000
#
# test_existing_net(slide_datapath, net_loadpath, model_name, dropout_ratio, batch_size, do_augment=False)
import dataset
import netutil
import util
import numpy as np
""" CONFIGURATION """
dropout_ratio = 0.5
number_of_epochs = 5
batch_size = 64
lr = 0.0001

#### log dir
log_directory = "/home/oole/tf_log"
""" LOAD DATA """
# training_data_path = 'D:/Data/tf_test_data/validation'
training_data_path = '/home/oole/tf_test_data/validation'
train_slidelist, train_slide_dimensions, train_num_patches, train_slide_label = data_tf.collect_data(
    training_data_path, batch_size)

patches = dataset.slidelist_to_patchlist(train_slidelist)
no_patches = len(patches)
""" CREATE TRAINING AND VALIDATION DATASET """

train_dataset = dataset.img_dataset(patches,
                                    batch_size,
                                    shuffle=True,
                                    shuffle_buffer_size=no_patches)
train_iterator = train_dataset.make_initializable_iterator()

val_dataset = dataset.img_dataset(patches,
                                  batch_size,
                                  shuffle=False,
                                  shuffle_buffer_size=no_patches)