示例#1
0
def evaluate(object_folder):
    test_images_list = os.path.join(object_folder, 'test_images_list.csv')
    test_labels_list = os.path.join(object_folder, 'test_labels_list.csv')

    test_image_filenames = KScsv.read_csv(test_images_list)
    test_label_filenames = KScsv.read_csv(test_labels_list)

    all_prediction = list()
    all_label = list()
    f1score_per_image = list()
    all_score = list()
    for i_image, (image_file, label_file) in enumerate(
            zip(test_image_filenames, test_label_filenames)):

        tick = time.time()

        basename = os.path.basename(image_file[0])
        basename = os.path.splitext(basename)[0]
        image_file = os.path.join(object_folder, 'result', basename + '.mat')

        # Read in result and label
        mat_content = matlab.load(image_file)
        score = mat_content['mask']
        prediction = score > 0.5
        prediction = prediction.astype('float')

        label = KSimage.imread(label_file[0])
        label = label.astype('float')
        label = label / 255.0
        label = label > 0.5
        label = label.astype('float')

        prediction = np.reshape(prediction, -1)
        label = np.reshape(label, -1)
        score = np.reshape(score, -1)

        all_prediction.append(prediction)
        all_label.append(label)
        all_score.append(score)

        f1score = metrics.f1_score(label, prediction, average='binary')
        f1score_per_image.append(f1score)

        duration = time.time() - tick
        print('evaluate %d / %d (%.2f sec)' %
              (i_image + 1, len(test_image_filenames), duration))

    all_label = np.reshape(np.array(all_label), -1)
    all_prediction = np.reshape(np.array(all_prediction), -1)
    all_score = np.reshape(np.array(all_score), -1)

    total_f1score = metrics.f1_score(all_label,
                                     all_prediction,
                                     average='binary')
    avg_f1score = np.mean(f1score_per_image)
    average_precision = metrics.average_precision_score(all_label,
                                                        all_score,
                                                        average='micro')

    return total_f1score, avg_f1score, average_precision, f1score_per_image
示例#2
0
def test(object_folder, model_path, filename_list, result_path, flags, igpu):
    checkpoint_dir = os.path.join(object_folder, 'checkpoint')
    mat_contents = matlab.load(
        os.path.join(checkpoint_dir, 'network_stats.mat'))
    mean_image = np.float32(mat_contents['mean_image'])
    variance_image = np.float32(mat_contents['variance_image'])

    ###########################################################
    os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"  # see issue #152
    os.environ["CUDA_VISIBLE_DEVICES"] = igpu
    ###########################################################

    with tf.Graph().as_default(), tf.device(flags['gpu']):
        keep_prob = tf.placeholder(tf.float32)
        # Place holder for patches
        images_test = tf.placeholder(tf.float32,
                                     shape=(np.hstack([
                                         flags['test_batch_size'],
                                         flags['size_input_patch']
                                     ])))
        # Network
        with tf.variable_scope("network") as scope:
            logits_test, parameters = tf_model.inference(
                images_test, keep_prob, flags)
        # Saver and initialisation
        saver = tf.train.Saver()
        init = tf.initialize_all_variables()

        gpu_options = tf.GPUOptions(
            per_process_gpu_memory_fraction=flags['gpu_memory_fraction'])
        config = tf.ConfigProto(allow_soft_placement=True,
                                gpu_options=gpu_options)

        with tf.Session(config=config) as sess:
            # Initialise and load variables
            sess.run(init)
            saver.restore(sess, model_path)

            result_dir = result_path
            routine.create_dir(result_dir)

            for iImage, file in enumerate(filename_list):
                start_time = time.time()
                file = file[0]
                basename = os.path.basename(file)
                basename = os.path.splitext(basename)[0]
                savename = os.path.join(result_dir, basename + '.png')
                if not os.path.exists(savename):
                    result = batch_processing(file, sess, logits_test,
                                              parameters, images_test,
                                              keep_prob, mean_image,
                                              variance_image, flags)
                    # matlab.save(savename,{'mask':result})
                    KSimage.imwrite(result, savename)
                duration = time.time() - start_time
                print(
                    'Finish segmenting DCIS regions on the H&E image of sample %d out of %d samples (%.2f sec)'
                    % (iImage + 1, len(filename_list), duration))
示例#3
0
def testWSI(object_folder, model_path, directory, flags):
    """
    testWSI segments all of the WSIs in a given directory

    param: object_folder
    param: model_path
    param: directory
    param: gpu
    param: gpu_memory_fraction
    param: test_batch_size
    param: size_input_patch
    return: writes segmentation result to corresponding segmentation result directory
    """

    checkpoint_dir = os.path.join(object_folder, 'checkpoint')
    mat_contents = matlab.load(os.path.join(checkpoint_dir, 'network_stats.mat'))
    mean_image = np.float32(mat_contents['mean_image'])
    variance_image = np.float32(mat_contents['variance_image'])
    startTime = time.time()
    with tf.Graph().as_default(), tf.device(flags['gpu']):
        keep_prob = tf.placeholder(tf.float32)

        # Place holder for patches
        images_test = tf.placeholder(tf.float32, shape=(np.hstack([flags['test_batch_size'], flags['size_input_patch']])))

        # Network
        with tf.variable_scope("network") as scope:
            logits_test, parameters = tf_model.inference(images_test, keep_prob, flags)

        # Saver and initialisation
        saver = tf.train.Saver()
        init = tf.global_variables_initializer()

        gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=flags['gpu_memory_fraction'])
        config = tf.ConfigProto(allow_soft_placement=True, gpu_options=gpu_options)

        with tf.Session(config = config) as sess:
            # Initialise and load variables
            sess.run(init)
            saver.restore(sess, model_path)

            print("Current directory: " + str(directory))
            result_dir = os.path.join(directory +'_epiStromalSeg')
            create_dir(result_dir)
            filename_list = glob.glob(os.path.join(directory, '*.png')) #the main statement, returns all the files in the directory
            print("Num Files: " + str(len(filename_list)))
            for file in filename_list:
                print(file)
                basename = os.path.basename(file)
                basename = os.path.splitext(basename)[0]
                savename = os.path.join(result_dir, basename + '.png')
                if not os.path.exists(savename) and not ('mask' in file or 'thumbnail' in file):
                    result = batch_processing(file, sess, logits_test, parameters, images_test, keep_prob, mean_image, variance_image, flags)
                    KSimage.imwrite(result, savename)

            print("Total Time: " + str(time.time() - startTime))
示例#4
0
def retouch_segmentation(file):
    """
    retouch_segmentation uses binary morphological operations (erode and dilate)
    to improve the segmentation result

    param: segmentation result
    return: retouched segmentation result
    """

    matcontent = matlab.load(file)
    mask = matcontent['mask']
    mask = np.squeeze(mask)

    # threshold
    binary_mask = mask > 0.8
    binary_mask_base = mask > 0.5

    # define disk structure
    radius = 5
    [x, y] = np.meshgrid(range(-radius, radius + 1),
                         range(-radius, radius + 1))
    z = np.sqrt(x**2 + y**2)
    structure = z < radius

    # imerosion
    erode_mask = binary_erosion(binary_mask,
                                structure=structure,
                                border_value=1)
    erode_mask = remove_small_objects(erode_mask, 100)

    # watershed
    distance = ndimage.distance_transform_edt(binary_mask_base)
    markers = ndimage.label(erode_mask)[0]
    labels = watershed(-distance, markers, mask=binary_mask_base)

    return labels
def define_graph(object_folder, checkpoint_folder, flags):
    # global step
    global_step = tf.Variable(0, trainable=False, name='global_step')

    # Epoch counter
    curr_epoch = tf.Variable(0, trainable=False, name='curr_epoch')
    update_curr_epoch = tf.assign(curr_epoch, tf.add(curr_epoch,
                                                     tf.constant(1)))

    # drop out
    keep_prob = tf.placeholder(tf.float32)

    # network stats
    # Load network stats
    mat_contents = matlab.load(
        os.path.join(checkpoint_folder, 'network_stats.mat'))
    mean_img = np.float32(mat_contents['mean_image'])
    variance_img = np.float32(mat_contents['variance_image'])

    if mean_img.ndim == 2:
        mean_img = np.expand_dims(mean_img, axis=2)
    if variance_img.ndim == 2:
        variance_img = np.expand_dims(variance_img, axis=2)

    mean_image = tf.Variable(mean_img, trainable=False, name='mean_image')
    variance_image = tf.Variable(variance_img,
                                 trainable=False,
                                 name='variance_image')

    # Get images and labels.
    out_content_train = tf_model_input.inputs(mean_image, variance_image,
                                              object_folder, 'train', flags)
    out_content_val = tf_model_input.inputs(mean_image, variance_image,
                                            object_folder, 'val', flags)

    images_train = out_content_train['images']
    labels_train = out_content_train['labels']
    weights_train = out_content_train['weights']

    images_val = out_content_val['images']
    labels_val = out_content_val['labels']
    weights_val = out_content_val['weights']

    # Build a Graph that computes the logits predictions from the inference model.
    with tf.variable_scope("network") as scope:
        sigmoid_all_train, parameters = tf_model.inference(
            images_train, keep_prob, flags)
        scope.reuse_variables()
        sigmoid_all_val, _ = tf_model.inference(images_val, keep_prob, flags)

    # Calculate loss.
    loss_train = tf_model.loss(sigmoid_all_train, labels_train, weights_train,
                               curr_epoch)
    loss_val = tf_model.loss(sigmoid_all_val, labels_val, weights_val,
                             curr_epoch)

    # Accuracy train
    predict_train = tf.squeeze(tf.to_float(sigmoid_all_train > 0.25))
    actual_train = tf.squeeze(tf.to_float(labels_train > 0.25))
    accuracy_train_output = tf_model.accuracy(predict_train, actual_train)

    # Accuracy val
    predict_val = tf.squeeze(tf.to_float(sigmoid_all_val > 0.25))
    actual_val = tf.squeeze(tf.to_float(labels_val > 0.25))
    accuracy_val_output = tf_model.accuracy(predict_val, actual_val)

    # Build a Graph that trains the model with one batch of examples and
    # updates the model parameters.
    train_op = tf_model.train(loss_train['avg_cross_entropy_log_loss'],
                              global_step, parameters, flags)

    return {
        'global_step': global_step,
        'curr_epoch': curr_epoch,
        'update_curr_epoch': update_curr_epoch,
        'keep_prob': keep_prob,
        'loss_train': loss_train,
        'loss_val': loss_val,
        'predict_train': predict_train,
        'actual_train': actual_train,
        'predict_val': predict_val,
        'actual_val': actual_val,
        'train_op': train_op,
        'accuracy_train_output': accuracy_train_output,
        'accuracy_val_output': accuracy_val_output,
        'parameters': parameters,
        'out_content_train': out_content_train,
        'out_content_val': out_content_val,
        'sigmoid_all_train': sigmoid_all_train,
        'sigmoid_all_val': sigmoid_all_val
    }
示例#6
0
def test(object_folder, model_path, filename_list, flags):
    """
    test uses either whole image segmentation or patch based
    segmentation to segment an entire directory of test images

    param: object_folder
    param: model_path
    param: filename_list
    param: gpu
    param: gpu_memory_fraction
    return: writes segmentation result to appropriate image file
    """

    checkpoint_dir = os.path.join(object_folder, 'checkpoint')
    mat_contents = matlab.load(
        os.path.join(checkpoint_dir, 'network_stats.mat'))
    mean_image = np.float32(mat_contents['mean_image'])
    variance_image = np.float32(mat_contents['variance_image'])
    startTime = time.time()

    with tf.Graph().as_default(), tf.device(flags['gpu']):
        keep_prob = tf.placeholder(tf.float32)

        # Place holder for patches
        images_test = tf.placeholder(tf.float32,
                                     shape=(np.hstack([
                                         flags['test_batch_size'],
                                         flags['size_input_patch']
                                     ])))

        # Network
        with tf.variable_scope("network") as scope:
            logits_test, parameters = tf_model.inference(
                images_test, keep_prob, flags)

        # Saver and initialisation
        saver = tf.train.Saver()
        init = tf.initialize_all_variables()

        gpu_options = tf.GPUOptions(
            per_process_gpu_memory_fraction=flags['gpu_memory_fraction'])
        config = tf.ConfigProto(allow_soft_placement=True,
                                gpu_options=gpu_options)

        with tf.Session(config=config) as sess:
            # Initialise and load variables
            sess.run(init)
            saver.restore(sess, model_path)

            result_dir = os.path.join(object_folder, 'result')
            routine.create_dir(result_dir)
            print("FILENAME LIST", filename_list)
            for iImage, file in enumerate(filename_list):
                file = file[0]
                basename = os.path.basename(file)
                basename = os.path.splitext(basename)[0]
                savename = os.path.join(result_dir, basename + '.png')
                if not os.path.exists(savename):
                    print('processing image %d/%d' %
                          (iImage + 1, len(filename_list)))
                    result = batch_processing(file, sess, logits_test,
                                              parameters, images_test,
                                              keep_prob, mean_image,
                                              variance_image, flags)

                    KSimage.imwrite(result, savename)
            print("Total Time: " + str(time.time() - startTime))
def define_graph(object_folder, checkpoint_folder, flags):
    #keys = ['HE', 'DAPI', 'label']
    keys = list(flags['dict_path'].keys())
    keys.remove('group')

    # global step
    global_step = tf.Variable(0, trainable=False, name='global_step')

    # Epoch counter
    curr_epoch = tf.Variable(0, trainable=False, name='curr_epoch')
    update_curr_epoch = tf.assign(curr_epoch, tf.add(curr_epoch, tf.constant(1)))

    # drop out
    keep_prob = tf.placeholder(tf.float32)

    # random field brightness
    # random_field = tf.placeholder_with_default(
    #     np.ones(shape = (flags['size_input_patch'][0],flags['size_input_patch'][1],1), dtype = np.float32),
    #                               shape = (flags['size_input_patch'][0],flags['size_input_patch'][1],1))

    # network stats
    mat_contents = matlab.load(os.path.join(checkpoint_folder, 'network_stats.mat'))    

    # Get images
    # out_content_train - dictionary containing 32,512,512,3 
    # labels - dictionary with pair containing 32,
    out_content_train, train_labels = tf_model_input.inputs(object_folder, 'train', flags, mat_contents)
    out_content_val, val_labels = tf_model_input.inputs(object_folder, 'val', flags, mat_contents)

    images_train = out_content_train['HE']
    #labels_train = out_content_train['labels']
    #weights_train = out_content_train['weights']

    images_val = out_content_val['HE']
    #labels_val = out_content_val['labels']
    #weights_val = out_content_val['weights']
    # Build a Graph that computes the logits predictions from the inference model.
    # sigmoid_all_train - 32,4 for 4 different classes with batch 32 
    with tf.variable_scope("network") as scope:
        sigmoid_all_train, parameters = tf_model.inference(images_train, keep_prob, flags)
        scope.reuse_variables()
        sigmoid_all_val, _ = tf_model.inference(images_val, keep_prob, flags)
    
    # Get model weights
    counts = matlab.load(os.path.join(checkpoint_folder, 'counts.mat'))    

    # Calculate loss.
    loss_train = tf_model.loss(sigmoid_all_train, train_labels, curr_epoch, flags, counts)
    loss_val = tf_model.loss(sigmoid_all_val, val_labels, curr_epoch, flags, counts)

    # Accuracy train
    predict_train = tf.squeeze(tf.argmax(sigmoid_all_train, dimension=1))
    actual_train = tf.squeeze(train_labels['HE'])
    accuracy_train_output = tf_model.accuracy(predict_train, actual_train, flags)

    # Accuracy val
    predict_val = tf.squeeze(tf.argmax(sigmoid_all_val, dimension=1))
    actual_val = tf.squeeze(train_labels['HE'])
    accuracy_val_output = tf_model.accuracy(predict_val, actual_val, flags)

    # Build a Graph that trains the model with one batch of examples and
    # updates the model parameters.
    train_op = tf_model.train(loss_train['avg_cross_entropy_log_loss'], global_step, parameters, flags)

    return {'global_step': global_step,
            'curr_epoch': curr_epoch,
            'update_curr_epoch': update_curr_epoch,
            'keep_prob': keep_prob,
            'loss_train': loss_train,
            'loss_val': loss_val,
            'predict_train': predict_train,
            'actual_train': actual_train,
            'predict_val': predict_val,
            'actual_val': actual_val,
            'train_op': train_op,
            'accuracy_train_output': accuracy_train_output,
            'accuracy_val_output': accuracy_val_output,
            'parameters': parameters,
            'out_content_train': out_content_train,
            'out_content_val': out_content_val,
            'sigmoid_all_train': sigmoid_all_train,
            'sigmoid_all_val': sigmoid_all_val
            # 'random_field': random_field
            }
示例#8
0
def test(object_folder, model_path, filename_list, flags):
    """
    test uses either whole image segmentation or patch based
    segmentation to segment an entire directory of test images

    param: object_folder
    param: model_path
    param: filename_list
    param: gpu
    param: gpu_memory_fraction
    return: writes segmentation result to appropriate image file
    """

    checkpoint_dir = os.path.join(object_folder, 'checkpoint')
    mat_contents = matlab.load(os.path.join(checkpoint_dir, 'network_stats.mat'))
    mean_image = np.float32(mat_contents['mean_image'])
    variance_image = np.float32(mat_contents['variance_image'])

    # turns 256 x 256 x 3 into 1 x 1 x 3
    mean_image_new = np.array([mean_image[:, :, 0].mean(), mean_image[:, :, 1].mean(), mean_image[:, :, 2].mean()])
    variance_image_new = np.array([variance_image[:, :, 0].mean(), variance_image[:, :, 1].mean(), variance_image[:, :, 2].mean()])

    with tf.Graph().as_default(), tf.device(flags['gpu']):
        keep_prob = tf.placeholder(tf.float32)

        # Place holder for patches
        images_test = tf.placeholder(tf.float32)

        # Network
        with tf.variable_scope("network") as scope:
            logits_test, parameters = tf_model.inference(images_test, keep_prob, flags)

        # Saver and initialisation
        saver = tf.train.Saver()
        init = tf.global_variables_initializer()

        gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=flags['gpu_memory_fraction'])
        config = tf.ConfigProto(allow_soft_placement=True, gpu_options=gpu_options)

        with tf.Session(config=config) as sess:
            # Initialise and load variables
            sess.run(init)
            saver.restore(sess, model_path)

            result_dir = os.path.join(object_folder, 'result')
            create_dir(result_dir)

            start_time = time.time()
            for iImage, file in enumerate(filename_list):
                file = file[0]
                basename = os.path.basename(file)
                basename = os.path.splitext(basename)[0]
                savename = os.path.join(result_dir, basename + '.png')

                
        print('processing image %d/%d' % (iImage + 1, len(filename_list)))
        print("FILE!!!!!!!!!!!" + str(file))

        if (flags['use_patches'] == False):
            result = whole_image_processing(file, sess, logits_test, parameters, images_test, keep_prob, mean_image_new, variance_image_new, flags)
        else:
            result = batch_processing(file, sess, logits_test, parameters, images_test, keep_prob mean_image, variance_image, flags)

        print("Image processed")
        KSimage.imwrite(result, savename) #Write result back to image once segmentation is fixed
        print("TOTAL DURATION : " + str(time.time() - start_time))
示例#9
0
def define_graph(object_folder, checkpoint_folder, flags):
    """
    define_graph defines the computational graph in tensorflow
    which represents the entire network

    param: object_folder
    param: checkpoint_folder
    param: flags
    return: train operation dictionary
    """

    print("into define graph")

    # global step
    global_step = tf.Variable(0, trainable=False, name='global_step')

    # epoch counter
    curr_epoch = tf.Variable(0, trainable=False, name='curr_epoch')
    update_curr_epoch = tf.assign(curr_epoch, tf.add(curr_epoch,
                                                     tf.constant(1)))

    # drop out
    keep_prob = tf.placeholder(tf.float32)

    # load network stats
    mat_contents = matlab.load(
        os.path.join(checkpoint_folder, 'network_stats.mat'))
    mean_img = np.float32(mat_contents['mean_image'])
    variance_img = np.float32(mat_contents['variance_image'])

    if mean_img.ndim == 2:
        mean_img = np.expand_dims(mean_img, axis=2)
    if variance_img.ndim == 2:
        variance_img = np.expand_dims(variance_img, axis=2)

    print("mean image success defined")
    mean_image = tf.Variable(mean_img, trainable=False, name='mean_image')
    print("variance image success defined")
    variance_image = tf.Variable(variance_img,
                                 trainable=False,
                                 name='variance_image')

    # get images and labels
    print("out content train")
    out_content_train = tf_model_input.inputs(mean_image, variance_image,
                                              object_folder, 'train', flags)
    print("done with out content train going into out content val")
    out_content_val = tf_model_input.inputs(mean_image, variance_image,
                                            object_folder, 'val', flags)
    print("done with out content val")

    images_train = out_content_train['images']
    labels_train = out_content_train['labels']
    weights_train = out_content_train['weights']

    images_val = out_content_val['images']
    labels_val = out_content_val['labels']
    weights_val = out_content_val['weights']

    # build a graph that computes the logits predictions from the inference model.
    with tf.variable_scope("network") as scope:
        sigmoid_all_train, parameters = tf_model.inference(
            images_train, keep_prob, flags)
        scope.reuse_variables()
        sigmoid_all_val, _ = tf_model.inference(images_val, keep_prob, flags)

    loss_train = tf_model.loss(sigmoid_all_train, labels_train, weights_train,
                               curr_epoch, flags)
    loss_val = tf_model.loss(sigmoid_all_val, labels_val, weights_val,
                             curr_epoch, flags)

    # accuracy train
    predict_train = tf.squeeze(tf.argmax(
        sigmoid_all_train,
        axis=3))  #this is where the one-hot is turned back into the 256x256x3
    print("Predict Train shape: ", tf.shape(predict_train))
    actual_train = tf.squeeze(labels_train)
    print("Actual train shape: ", tf.shape(actual_train))
    accuracy_train_output = tf_model.accuracy(predict_train, actual_train,
                                              flags)

    # accuracy val
    predict_val = tf.squeeze(tf.argmax(sigmoid_all_val, axis=3))
    print("Predict Val shape:", tf.shape(predict_val))
    actual_val = tf.squeeze(labels_val)
    print("Actual val shape:", tf.shape(actual_val))
    accuracy_val_output = tf_model.accuracy(predict_val, actual_val, flags)

    # build a graph that trains the model with one batch of examples and updates the model parameters.
    train_op = tf_model.train(loss_train['avg_cross_entropy_log_loss'],
                              global_step, parameters, flags)
    print("out of define graph")
    return {
        'global_step': global_step,
        'curr_epoch': curr_epoch,
        'update_curr_epoch': update_curr_epoch,
        'keep_prob': keep_prob,
        'loss_train': loss_train,
        'loss_val': loss_val,
        'predict_train': predict_train,
        'actual_train': actual_train,
        'predict_val': predict_val,
        'actual_val': actual_val,
        'train_op': train_op,
        'accuracy_train_output': accuracy_train_output,
        'accuracy_val_output': accuracy_val_output,
        'parameters': parameters,
        'out_content_train': out_content_train,
        'out_content_val': out_content_val,
        'sigmoid_all_train': sigmoid_all_train,
        'sigmoid_all_val': sigmoid_all_val,
        'mean_image': mean_image,
        'variance_image': variance_image
    }