示例#1
0
def test(object_folder, model_path, filename_list, result_path, flags, igpu):
    checkpoint_dir = os.path.join(object_folder, 'checkpoint')
    mat_contents = matlab.load(
        os.path.join(checkpoint_dir, 'network_stats.mat'))
    mean_image = np.float32(mat_contents['mean_image'])
    variance_image = np.float32(mat_contents['variance_image'])

    ###########################################################
    os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"  # see issue #152
    os.environ["CUDA_VISIBLE_DEVICES"] = igpu
    ###########################################################

    with tf.Graph().as_default(), tf.device(flags['gpu']):
        keep_prob = tf.placeholder(tf.float32)
        # Place holder for patches
        images_test = tf.placeholder(tf.float32,
                                     shape=(np.hstack([
                                         flags['test_batch_size'],
                                         flags['size_input_patch']
                                     ])))
        # Network
        with tf.variable_scope("network") as scope:
            logits_test, parameters = tf_model.inference(
                images_test, keep_prob, flags)
        # Saver and initialisation
        saver = tf.train.Saver()
        init = tf.initialize_all_variables()

        gpu_options = tf.GPUOptions(
            per_process_gpu_memory_fraction=flags['gpu_memory_fraction'])
        config = tf.ConfigProto(allow_soft_placement=True,
                                gpu_options=gpu_options)

        with tf.Session(config=config) as sess:
            # Initialise and load variables
            sess.run(init)
            saver.restore(sess, model_path)

            result_dir = result_path
            routine.create_dir(result_dir)

            for iImage, file in enumerate(filename_list):
                start_time = time.time()
                file = file[0]
                basename = os.path.basename(file)
                basename = os.path.splitext(basename)[0]
                savename = os.path.join(result_dir, basename + '.png')
                if not os.path.exists(savename):
                    result = batch_processing(file, sess, logits_test,
                                              parameters, images_test,
                                              keep_prob, mean_image,
                                              variance_image, flags)
                    # matlab.save(savename,{'mask':result})
                    KSimage.imwrite(result, savename)
                duration = time.time() - start_time
                print(
                    'Finish segmenting DCIS regions on the H&E image of sample %d out of %d samples (%.2f sec)'
                    % (iImage + 1, len(filename_list), duration))
示例#2
0
def calculate_mean_variance_image(object_folder, flags):
    key_values = list(flags['dict_path'].keys())
    key_values.remove('group')

    # Setup
    network_stats_file_path = os.path.join(object_folder, 'checkpoint',
                                           'network_stats.mat')
    routine.create_dir(os.path.join(object_folder, 'checkpoint'))
    mean_dict = {}

    for key in key_values:
        image_folder = os.path.join(object_folder, 'train', key)
        list_images = glob.glob(
            os.path.join(image_folder, '*' + flags['dict_ext'][key]))

        image = KSimage.imread(list_images[0])
        #if np.random.randint(2, size=1) == 1:
        #    image = np.flipud(image)
        #if np.random.randint(2, size=1) == 1:
        #    image = np.fliplr(image)
        image = np.float32(image)

        mean_image = image
        variance_image = np.zeros(shape=image.shape, dtype=np.float32)

        for t, image_file in enumerate(list_images[1:]):
            image = KSimage.imread(image_file)

            # image = np.dstack((image[:, :, 2], image[:, :, 1], image[:, :, 0]))

            #if np.random.randint(2, size=1) == 1:
            #    image = np.flipud(image)
            #if np.random.randint(2, size=1) == 1:
            #    image = np.fliplr(image)
            image = np.float32(image)

            mean_dict[key + '_mean'] = (np.float32(t + 1) * mean_image +
                                        image) / np.float32(t + 2)

            mean_dict[key + '_var'] = np.float32(t + 1) / np.float32(t + 2) * variance_image \
                             + np.float32(1) / np.float32(t + 1) * ((image - mean_image) ** 2)

            print('calculate mean and variance: processing %d out of %d' %
                  (t + 2, len(list_images)))

    matlab.save(network_stats_file_path, mean_dict)
示例#3
0
def main(nth_fold, mode, flags, testdir):
    """
    main trains, tests, or executes the model on the provided
    data based on the specified preferences

    param: nth_fold
    param: mode
    param: experiment_folder
    param: image_ext
    param: test_model
    param: test_image_list
    return: saves segmentation results to appropriate file/directory
    """

    # check if cv or perm
    list_dir = os.listdir(os.path.join(flags['experiment_folder']))
    if ('cv' + str(nth_fold) in list_dir) and ('perm' + str(nth_fold)
                                               in list_dir):
        raise ValueError('Dangerous! You have both cv and perm on the path.')
    elif 'cv' + str(nth_fold) in list_dir:
        object_folder = os.path.join(flags['experiment_folder'],
                                     'cv' + str(nth_fold))
    elif 'perm' + str(nth_fold) in list_dir:
        object_folder = os.path.join(flags['experiment_folder'],
                                     'perm' + str(nth_fold))
    else:
        raise ValueError('No cv or perm folder!')

    # Train model
    if mode == 'train':
        checkpoint_folder = os.path.join(object_folder, 'checkpoint')
        network_stats_file_path = os.path.join(checkpoint_folder,
                                               'network_stats.mat')

        train_images_folder = os.path.join(object_folder, 'train', 'image')

        if not os.path.isfile(network_stats_file_path):
            list_images = glob.glob(
                os.path.join(train_images_folder, '*' + flags['image_ext']))
            print('calculating mean and variance image')
            mean_image, variance_image = utils.calculate_mean_variance_image(
                list_images)
            routine.create_dir(checkpoint_folder)
            matlab.save(network_stats_file_path, {
                'mean_image': mean_image,
                'variance_image': variance_image
            })

        tf_model_train.train(object_folder, flags)

    # Test model on validation set
    elif mode == 'test_model':
        checkpointlist = glob.glob(
            os.path.join(object_folder, 'checkpoint', 'model*meta'))
        checkpointlist = [
            file for file in checkpointlist if 'pretrain' not in file
        ]
        temp = []
        for filepath in checkpointlist:
            basename = os.path.basename(filepath)
            temp.append(int(float(basename.split('-')[-1].split('.')[0])))
        temp = np.sort(temp)

        model_path = os.path.join(
            object_folder, 'checkpoint',
            'model.ckpt-' + str(temp[flags['test_model']]))
        print('use epoch %d : model %s' % (flags['test_model'], 'model.ckpt-' +
                                           str(temp[flags['test_model']])))
        test_images_list = flags['test_image_list']
        filename_list = KScsv.read_csv(test_images_list)
        tf_model_test.test(object_folder, model_path, filename_list, flags)

    #Segment WSIs
    elif mode == 'test_WSI':
        checkpointlist = glob.glob(
            os.path.join(object_folder, 'checkpoint', 'model*meta'))
        checkpointlist = [
            file for file in checkpointlist if 'pretrain' not in file
        ]
        temp = []
        for filepath in checkpointlist:
            basename = os.path.basename(filepath)
            temp.append(int(float(basename.split('-')[-1].split('.')[0])))
        temp = np.sort(temp)

        model_path = os.path.join(
            object_folder, 'checkpoint',
            'model.ckpt-' + str(temp[flags['test_model']]))
        print('use epoch %d : model %s' % (flags['test_model'], 'model.ckpt-' +
                                           str(temp[flags['test_model']])))

        #should iterate over all subdirectories
        paths = get_immediate_subdirectories(testdir)
        list.sort(paths)  #sort WSIs into ascending numerical order
        #paths = paths[100:] #TODO: Enable based on which batch this code is running
        print("TEST DIR: " + str(testdir))

        for path in paths:
            print(os.path.join(testdir, path))
            if not os.path.isdir(
                    os.path.join(testdir, path + 'epiStromalSeg')
            ):  #prevents this from being executed with exsiting directories
                tf_model_test.testWSI(object_folder, model_path,
                                      os.path.join(testdir, path), flags)

                #TODO: uncomment to process only controls
                #imageCSV = open(os.path.join('/data', 'avellal14', 'WSI_patches', 'BBD_NCC_Covariate_Outcome_KK_JH_modifiedWithPaths.csv'),'rb')
#reader = csv.reader(imageCSV)
#csvList = list(reader)
#patientId = path[:path.index('_')]
#caseControlList =  next(subl for subl in csvList if patientId in subl)
#TODO: uncomment to process only cases
# if(caseControlList[1] == '1'): #only test the WSI if the image is indeed a case(1)
#        tf_model_test.testWSI(object_folder, model_path, os.path.join(testdir,path), flags)

    #Segment WSIs at patient level using data from CSV
    elif mode == 'test_Case_Control':
        checkpointlist = glob.glob(
            os.path.join(object_folder, 'checkpoint', 'model*meta'))
        checkpointlist = [
            file for file in checkpointlist if 'pretrain' not in file
        ]
        temp = []
        for filepath in checkpointlist:
            basename = os.path.basename(filepath)
            temp.append(int(float(basename.split('-')[-1].split('.')[0])))
        temp = np.sort(temp)

        model_path = os.path.join(
            object_folder, 'checkpoint',
            'model.ckpt-' + str(temp[flags['test_model']]))
        print('use epoch %d : model %s' % (flags['test_model'], 'model.ckpt-' +
                                           str(temp[flags['test_model']])))

        with open(
                os.path.join('/home', 'avellal14', 'data', 'Adithya_BBD_NHS',
                             'NHS_BBD_CODE',
                             'casesAndMatchedControls224.csv')) as csvFile:
            csvReader = csv.DictReader(csvFile)
            for row in csvReader:
                if (row['path'] == 'BBD_NCC_extractedat20x'
                        or row['path'] == 'BBD_NCC_extractedat20x_round2'):
                    testdir = os.path.join('/home', 'avellal14', 'data',
                                           'Adithya_BBD_NHS', row['path'])
                    paths = get_subdirectories_by_patient(testdir, row['id'])

                    for path in paths:
                        print('CURRENT WSI BEING SEGMENTED',
                              os.path.join(testdir, path))
                        if not os.path.isdir(
                                os.path.join(testdir, path + '_cellSeg')
                        ):  #prevents this from being executed with exsiting directories
                            tf_model_test.testWSI(object_folder, model_path,
                                                  os.path.join(testdir, path),
                                                  flags)
def train(object_folder, flags):
    checkpoint_folder = os.path.join(object_folder, 'checkpoint')
    routine.create_dir(checkpoint_folder)

    with tf.Graph().as_default(), tf.device(flags['gpu']):
        # define a graph.
        define_graph_output = define_graph(object_folder, checkpoint_folder,
                                           flags)

        # Create a saver.
        saver = tf.train.Saver(max_to_keep=0)
        # saver = tf.train.Saver(tf.global_variables(), max_to_keep=0)

        # Build an initialization operation to run below.
        init = tf.initialize_all_variables()
        # init = tf.global_variables_initializer()

        # Start running operations on the Graph.
        gpu_options = tf.GPUOptions(
            per_process_gpu_memory_fraction=flags['gpu_memory_fraction'])
        config = tf.ConfigProto(allow_soft_placement=True,
                                gpu_options=gpu_options)

        with tf.Session(config=config) as sess:

            # Start the queue runners
            sess.run(init)
            coord = tf.train.Coordinator()
            threads = tf.train.start_queue_runners(sess=sess, coord=coord)

            # load checkpoint
            checkpoint_output = load_checkpoint(
                sess, saver, define_graph_output['curr_epoch'],
                checkpoint_folder, define_graph_output['parameters'], flags)

            # epoch
            num_examples_per_epoch_for_train = flags[
                'num_examples_per_epoch_for_train']
            num_examples_per_epoch_for_val = flags[
                'num_examples_per_epoch_for_val']

            nTrainBatches = int((num_examples_per_epoch_for_train /
                                 float(flags['batch_size'])) + 1)
            nValBatches = int((num_examples_per_epoch_for_val /
                               float(flags['batch_size'])) + 1)

            for epoch in xrange(sess.run(define_graph_output['curr_epoch']),
                                flags['num_epochs'] + 1):
                train_val_variables = {
                    'avg_train_loss': list(),
                    'avg_train_precision': list(),
                    'avg_train_recall': list(),
                    'avg_train_f1score': list(),
                    'avg_val_loss': list(),
                    'avg_val_precision': list(),
                    'avg_val_recall': list(),
                    'avg_val_f1score': list()
                }

                # Training loop
                training_loop(sess, define_graph_output, train_val_variables,
                              nTrainBatches, epoch, checkpoint_folder)

                # Validation loop
                validation_loop(sess, define_graph_output, train_val_variables,
                                nValBatches, epoch, checkpoint_folder)

                # Average loss on training and validation
                checkpoint_output = update_training_validation_variables(
                    train_val_variables, checkpoint_output, nTrainBatches,
                    nValBatches, epoch)

                # Save the model after each epoch.
                save_model(sess, saver, define_graph_output, checkpoint_folder,
                           checkpoint_output)

            coord.request_stop()
            coord.join(threads)
            plt.close()
示例#5
0
def test(object_folder, model_path, filename_list, flags):
    """
    test uses either whole image segmentation or patch based
    segmentation to segment an entire directory of test images

    param: object_folder
    param: model_path
    param: filename_list
    param: gpu
    param: gpu_memory_fraction
    return: writes segmentation result to appropriate image file
    """

    checkpoint_dir = os.path.join(object_folder, 'checkpoint')
    mat_contents = matlab.load(
        os.path.join(checkpoint_dir, 'network_stats.mat'))
    mean_image = np.float32(mat_contents['mean_image'])
    variance_image = np.float32(mat_contents['variance_image'])
    startTime = time.time()

    with tf.Graph().as_default(), tf.device(flags['gpu']):
        keep_prob = tf.placeholder(tf.float32)

        # Place holder for patches
        images_test = tf.placeholder(tf.float32,
                                     shape=(np.hstack([
                                         flags['test_batch_size'],
                                         flags['size_input_patch']
                                     ])))

        # Network
        with tf.variable_scope("network") as scope:
            logits_test, parameters = tf_model.inference(
                images_test, keep_prob, flags)

        # Saver and initialisation
        saver = tf.train.Saver()
        init = tf.initialize_all_variables()

        gpu_options = tf.GPUOptions(
            per_process_gpu_memory_fraction=flags['gpu_memory_fraction'])
        config = tf.ConfigProto(allow_soft_placement=True,
                                gpu_options=gpu_options)

        with tf.Session(config=config) as sess:
            # Initialise and load variables
            sess.run(init)
            saver.restore(sess, model_path)

            result_dir = os.path.join(object_folder, 'result')
            routine.create_dir(result_dir)
            print("FILENAME LIST", filename_list)
            for iImage, file in enumerate(filename_list):
                file = file[0]
                basename = os.path.basename(file)
                basename = os.path.splitext(basename)[0]
                savename = os.path.join(result_dir, basename + '.png')
                if not os.path.exists(savename):
                    print('processing image %d/%d' %
                          (iImage + 1, len(filename_list)))
                    result = batch_processing(file, sess, logits_test,
                                              parameters, images_test,
                                              keep_prob, mean_image,
                                              variance_image, flags)

                    KSimage.imwrite(result, savename)
            print("Total Time: " + str(time.time() - startTime))
示例#6
0
import numpy as np
from KS_lib.prepare_data import routine
from KS_lib.image import KSimage

he_dir = os.path.join('HE')
he_cell_segmentation_result_path = os.path.join('Result')
he_dcis_segmentation_result_path = os.path.join('Result_tumour')

dict_path = {'he': he_dir}
dict_ext = {'he': '.tiff'}
gpu_list = ['0']

#######################################################################
# generate mask
mask_path = 'Mask'
routine.create_dir(mask_path)

files = glob.glob(os.path.join(dict_path['he'], '*' + dict_ext['he']))

for file in files:
    basename = os.path.basename(file)
    basename = os.path.splitext(basename)[0]
    savename = os.path.join(mask_path, basename + '.png')

    I = KSimage.imread(file)
    mask = 255 * np.ones(shape=(I.shape[0], I.shape[1]), dtype=np.uint8)
    KSimage.imwrite(mask, savename)

#######################################################################
# he cell segmentation
Modules.he_cell_segmentation(he_dir, dict_ext, mask_path,
示例#7
0
def train(object_folder, flags):
    """
    train defines the graph, then runs the training loop and validation loop and
    saves the model

    param: object_folder
    param: gpu
    param: gpu_memory_fraction
    param: num_examples_per_epoch_for_train
    param: num_examples_per_epoch_for_val
    param: batch_size
    param: num_epochs
    param: n_classes
    return: none
    """

    print("into train")
    checkpoint_folder = os.path.join(object_folder, 'checkpoint')
    routine.create_dir(checkpoint_folder)

    with tf.Graph().as_default(), tf.device(flags['gpu']):
        # define a graph
        print("GRAPH DEFINED! Good to go!")
        define_graph_output = define_graph(object_folder, checkpoint_folder,
                                           flags)

        #create a saver
        saver = tf.train.Saver(max_to_keep=0)

        # build an initialization operation to run below
        # init = tf.initialize_all_variables()
        init = tf.global_variables_initializer()

        # start running operations on the graph
        gpu_options = tf.GPUOptions(
            per_process_gpu_memory_fraction=flags['gpu_memory_fraction'])
        config = tf.ConfigProto(allow_soft_placement=True,
                                gpu_options=gpu_options)

        with tf.Session(config=config) as sess:
            config.gpu_options.allow_growth = True
            # start the queue runners
            # sess.run(tf.local_variables_initializer())
            sess.run(init)
            coord = tf.train.Coordinator()
            threads = tf.train.start_queue_runners(sess=sess, coord=coord)

            print("before checkpoint output")
            # load checkpoint
            checkpoint_output = load_checkpoint(
                sess, saver, define_graph_output['curr_epoch'],
                checkpoint_folder, define_graph_output['parameters'], flags)

            print("after checkpoint output")
            # epoch
            num_examples_per_epoch_for_train = flags[
                'num_examples_per_epoch_for_train']
            num_examples_per_epoch_for_val = flags[
                'num_examples_per_epoch_for_val']

            nTrainBatches = int((num_examples_per_epoch_for_train /
                                 float(flags['batch_size'])) + 1)
            nValBatches = int((num_examples_per_epoch_for_val /
                               float(flags['batch_size'])) + 1)

            print("going into for loop where define_graph_output is called")
            for epoch in xrange(sess.run(define_graph_output['curr_epoch']),
                                flags['num_epochs'] + 1):
                print("not making it past the for statement in the loop")
                train_val_variables = {
                    'avg_train_loss': [],
                    'avg_train_precision':
                    [[] for iclass in xrange(flags['n_classes'])],
                    'avg_train_recall':
                    [[] for iclass in xrange(flags['n_classes'])],
                    'avg_train_f1score':
                    [[] for iclass in xrange(flags['n_classes'])],
                    'avg_val_loss': [],
                    'avg_val_precision':
                    [[] for iclass in xrange(flags['n_classes'])],
                    'avg_val_recall':
                    [[] for iclass in xrange(flags['n_classes'])],
                    'avg_val_f1score':
                    [[] for iclass in xrange(flags['n_classes'])]
                }
                print("GOES INTO TRAINING LOOP")

                # training loop
                training_loop(sess, define_graph_output, train_val_variables,
                              nTrainBatches, epoch, checkpoint_folder, flags)
                print("COMES OUT OF TRAINING LOOP")

                # validation loop
                validation_loop(sess, define_graph_output, train_val_variables,
                                nValBatches, epoch, checkpoint_folder, flags)

                # average loss on training and validation
                checkpoint_output = update_training_validation_variables(
                    train_val_variables, checkpoint_output, nTrainBatches,
                    nValBatches, epoch, flags)

                # save the model after each epoch
                save_model(sess, saver, define_graph_output, checkpoint_folder,
                           checkpoint_output)

            coord.request_stop()
            coord.join(threads)
            plt.close()
            print("out of train")