def read_data_test(filename, flags):
    stride = flags['stride_test']

    image = KSimage.imread(filename)
    ori_dim = image.shape
    image = KSimage.imresize(image, 0.25)  # resize to 1/4 times

    if image.ndim == 2:
        image = np.expand_dims(image, axis=3)

    padrow = flags['size_input_patch'][0]
    padcol = flags['size_input_patch'][1]

    image = np.lib.pad(image, ((padrow, padrow), (padcol, padcol), (0, 0)),
                       'symmetric')

    # extract patches
    patches = ExtractPatches_test(flags['size_input_patch'], stride, image)

    ntimes_row = int(
        np.floor((image.shape[0] - flags['size_input_patch'][0]) /
                 float(stride[0])) + 1)
    ntimes_col = int(
        np.floor((image.shape[1] - flags['size_input_patch'][1]) /
                 float(stride[1])) + 1)
    rowRange = range(0, ntimes_row * stride[0], stride[0])
    colRange = range(0, ntimes_col * stride[1], stride[1])

    nPatches = len(rowRange) * len(colRange)

    return patches, image.shape, nPatches, ori_dim
Exemplo n.º 2
0
def read_whole_image_test(filename, flags):
    """
    read_whole_image_test reads in the test image, resizes it
    from 10x to 2.5x, and returns it along with its shape

    param: filename
    return: image, dcis_mask, image.shape
    """

    image = KSimage.imread(filename)
    basename = os.path.basename(filename)
    dcis_mask_file = os.path.join('experiment_dcis_segmentation', 'perm1',
                                  'result', basename)

    if os.path.exists(dcis_mask_file):
        dcis_mask = KSimage.imread(dcis_mask_file)
    else:
        dcis_mask = np.ones(shape=(image.shape[0], image.shape[1])) * 255.0
        dcis_mask = dcis_mask.astype(np.uint8)

    #Resizing from 10x to 2.5x
    image = KSimage.imresize(image, 0.25)
    dcis_mask = KSimage.imresize(dcis_mask, 0.25)

    if image.ndim == 2:
        image = np.expand_dims(image, axis=3)

    if dcis_mask.ndim == 2:
        dcis_mask = np.expand_dims(dcis_mask, axis=3)

    return image, dcis_mask, image.shape
Exemplo n.º 3
0
def calculate_mean_variance_image(list_images):
    image = KSimage.imread(list_images[0])

    if np.random.randint(2, size=1) == 1:
        image = np.flipud(image)
    if np.random.randint(2, size=1) == 1:
        image = np.fliplr(image)
    image = np.float32(image)

    mean_image = image
    variance_image = np.zeros(shape=image.shape, dtype=np.float32)

    for t, image_file in enumerate(list_images[1:]):
        image = KSimage.imread(image_file)

        # image = np.dstack((image[:, :, 2], image[:, :, 1], image[:, :, 0]))

        if np.random.randint(2, size=1) == 1:
            image = np.flipud(image)
        if np.random.randint(2, size=1) == 1:
            image = np.fliplr(image)
        image = np.float32(image)

        mean_image = (np.float32(t + 1) * mean_image + image) / np.float32(t +
                                                                           2)

        variance_image = np.float32(t + 1) / np.float32(t + 2) * variance_image \
                         + np.float32(1) / np.float32(t + 1) * ((image - mean_image) ** 2)

        print('calculate mean and variance: processing %d out of %d' %
              (t + 2, len(list_images)))
    return mean_image, variance_image
Exemplo n.º 4
0
def read_data_test(filename, flags, dcis_segmentation_result_path):
    stride = flags['stride_test']

    image = KSimage.imread(filename)

    files = [
        f for f in os.listdir(dcis_segmentation_result_path)
        if os.path.isfile(os.path.join(dcis_segmentation_result_path, f))
    ]

    basename = os.path.basename(filename)
    basename = os.path.splitext(basename)[0]
    pos = [m.start() for m in re.finditer('_', basename)]
    # basename = basename[0:pos[3] + 1]

    basename = [x for x in files if basename in x][0]

    dcis_mask_file = os.path.join(dcis_segmentation_result_path, basename)
    if os.path.exists(dcis_mask_file):
        dcis_mask = KSimage.imread(dcis_mask_file)
    else:
        dcis_mask = np.ones(shape=(image.shape[0], image.shape[1], 1)) * 255.0
        dcis_mask = dcis_mask.astype(np.uint8)

    image = KSimage.imresize(image, 2.0)
    dcis_mask = KSimage.imresize(dcis_mask, 2.0)

    if image.ndim == 2:
        image = np.expand_dims(image, axis=3)

    if dcis_mask.ndim == 2:
        dcis_mask = np.expand_dims(dcis_mask, axis=3)

    padrow = flags['size_input_patch'][0]
    padcol = flags['size_input_patch'][1]

    image = np.lib.pad(image, ((padrow, padrow), (padcol, padcol), (0, 0)),
                       'symmetric')
    dcis_mask = np.lib.pad(dcis_mask,
                           ((padrow, padrow), (padcol, padcol), (0, 0)),
                           'symmetric')

    # extract patches
    patches = ExtractPatches_test(flags['size_input_patch'], stride, image)
    patches_mask = ExtractPatches_test(flags['size_input_patch'], stride,
                                       dcis_mask)

    ntimes_row = int(
        np.floor((image.shape[0] - flags['size_input_patch'][0]) /
                 float(stride[0])) + 1)
    ntimes_col = int(
        np.floor((image.shape[1] - flags['size_input_patch'][1]) /
                 float(stride[1])) + 1)
    rowRange = range(0, ntimes_row * stride[0], stride[0])
    colRange = range(0, ntimes_col * stride[1], stride[1])

    nPatches = len(rowRange) * len(colRange)

    return patches, patches_mask, image.shape, nPatches
Exemplo n.º 5
0
def test(object_folder, model_path, filename_list, result_path, flags, igpu):
    checkpoint_dir = os.path.join(object_folder, 'checkpoint')
    mat_contents = matlab.load(
        os.path.join(checkpoint_dir, 'network_stats.mat'))
    mean_image = np.float32(mat_contents['mean_image'])
    variance_image = np.float32(mat_contents['variance_image'])

    ###########################################################
    os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"  # see issue #152
    os.environ["CUDA_VISIBLE_DEVICES"] = igpu
    ###########################################################

    with tf.Graph().as_default(), tf.device(flags['gpu']):
        keep_prob = tf.placeholder(tf.float32)
        # Place holder for patches
        images_test = tf.placeholder(tf.float32,
                                     shape=(np.hstack([
                                         flags['test_batch_size'],
                                         flags['size_input_patch']
                                     ])))
        # Network
        with tf.variable_scope("network") as scope:
            logits_test, parameters = tf_model.inference(
                images_test, keep_prob, flags)
        # Saver and initialisation
        saver = tf.train.Saver()
        init = tf.initialize_all_variables()

        gpu_options = tf.GPUOptions(
            per_process_gpu_memory_fraction=flags['gpu_memory_fraction'])
        config = tf.ConfigProto(allow_soft_placement=True,
                                gpu_options=gpu_options)

        with tf.Session(config=config) as sess:
            # Initialise and load variables
            sess.run(init)
            saver.restore(sess, model_path)

            result_dir = result_path
            routine.create_dir(result_dir)

            for iImage, file in enumerate(filename_list):
                start_time = time.time()
                file = file[0]
                basename = os.path.basename(file)
                basename = os.path.splitext(basename)[0]
                savename = os.path.join(result_dir, basename + '.png')
                if not os.path.exists(savename):
                    result = batch_processing(file, sess, logits_test,
                                              parameters, images_test,
                                              keep_prob, mean_image,
                                              variance_image, flags)
                    # matlab.save(savename,{'mask':result})
                    KSimage.imwrite(result, savename)
                duration = time.time() - start_time
                print(
                    'Finish segmenting DCIS regions on the H&E image of sample %d out of %d samples (%.2f sec)'
                    % (iImage + 1, len(filename_list), duration))
Exemplo n.º 6
0
def read_data_test(filename, flags):
    """
    read_data_test reads in the test image, resizes it
    from 10x to 2.5x, and extracts patches from the image

    param: filename
    param: stride_test
    return: patches, patches_mask, image.shape, nPatches
    """

    stride = flags['stride_test']

    image = KSimage.imread(filename)
    image = KSimage.imresize(image, 0.25)  #Resizing from 10x to 2.5x
    basename = os.path.basename(filename)
    dcis_mask_file = os.path.join('experiment_epi_stromal_segmentation',
                                  'perm1', 'result', basename)
    if os.path.exists(dcis_mask_file):
        dcis_mask = KSimage.imread(dcis_mask_file)
    else:
        dcis_mask = np.ones(shape=(image.shape[0], image.shape[1])) * 255.0
        dcis_mask = dcis_mask.astype(np.uint8)

    if image.ndim == 2:
        image = np.expand_dims(image, axis=3)

    if dcis_mask.ndim == 2:
        dcis_mask = np.expand_dims(dcis_mask, axis=3)

    padrow = flags['size_input_patch'][0]
    padcol = flags['size_input_patch'][1]

    image = np.lib.pad(image, ((padrow, padrow), (padcol, padcol), (0, 0)),
                       'symmetric')
    dcis_mask = np.lib.pad(dcis_mask,
                           ((padrow, padrow), (padcol, padcol), (0, 0)),
                           'symmetric')

    print("INPUT SHAPE: " + str(image.shape))

    # extract patches
    patches = ExtractPatches_test(flags['size_input_patch'], stride, image)
    patches_mask = ExtractPatches_test(flags['size_input_patch'], stride,
                                       dcis_mask)

    ntimes_row = int(
        np.floor((image.shape[0] - flags['size_input_patch'][0]) /
                 float(stride[0])) + 1)
    ntimes_col = int(
        np.floor((image.shape[1] - flags['size_input_patch'][1]) /
                 float(stride[1])) + 1)
    rowRange = range(0, ntimes_row * stride[0], stride[0])
    colRange = range(0, ntimes_col * stride[1], stride[1])

    nPatches = len(rowRange) * len(colRange)

    return patches, patches_mask, image.shape, nPatches
Exemplo n.º 7
0
def testWSI(object_folder, model_path, directory, flags):
    """
    testWSI segments all of the WSIs in a given directory

    param: object_folder
    param: model_path
    param: directory
    param: gpu
    param: gpu_memory_fraction
    param: test_batch_size
    param: size_input_patch
    return: writes segmentation result to corresponding segmentation result directory
    """

    checkpoint_dir = os.path.join(object_folder, 'checkpoint')
    mat_contents = matlab.load(os.path.join(checkpoint_dir, 'network_stats.mat'))
    mean_image = np.float32(mat_contents['mean_image'])
    variance_image = np.float32(mat_contents['variance_image'])
    startTime = time.time()
    with tf.Graph().as_default(), tf.device(flags['gpu']):
        keep_prob = tf.placeholder(tf.float32)

        # Place holder for patches
        images_test = tf.placeholder(tf.float32, shape=(np.hstack([flags['test_batch_size'], flags['size_input_patch']])))

        # Network
        with tf.variable_scope("network") as scope:
            logits_test, parameters = tf_model.inference(images_test, keep_prob, flags)

        # Saver and initialisation
        saver = tf.train.Saver()
        init = tf.global_variables_initializer()

        gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=flags['gpu_memory_fraction'])
        config = tf.ConfigProto(allow_soft_placement=True, gpu_options=gpu_options)

        with tf.Session(config = config) as sess:
            # Initialise and load variables
            sess.run(init)
            saver.restore(sess, model_path)

            print("Current directory: " + str(directory))
            result_dir = os.path.join(directory +'_epiStromalSeg')
            create_dir(result_dir)
            filename_list = glob.glob(os.path.join(directory, '*.png')) #the main statement, returns all the files in the directory
            print("Num Files: " + str(len(filename_list)))
            for file in filename_list:
                print(file)
                basename = os.path.basename(file)
                basename = os.path.splitext(basename)[0]
                savename = os.path.join(result_dir, basename + '.png')
                if not os.path.exists(savename) and not ('mask' in file or 'thumbnail' in file):
                    result = batch_processing(file, sess, logits_test, parameters, images_test, keep_prob, mean_image, variance_image, flags)
                    KSimage.imwrite(result, savename)

            print("Total Time: " + str(time.time() - startTime))
Exemplo n.º 8
0
def evaluate(object_folder):
    test_images_list = os.path.join(object_folder, 'test_images_list.csv')
    test_labels_list = os.path.join(object_folder, 'test_labels_list.csv')

    test_image_filenames = KScsv.read_csv(test_images_list)
    test_label_filenames = KScsv.read_csv(test_labels_list)

    all_prediction = list()
    all_label = list()
    f1score_per_image = list()
    all_score = list()
    for i_image, (image_file, label_file) in enumerate(
            zip(test_image_filenames, test_label_filenames)):

        tick = time.time()

        basename = os.path.basename(image_file[0])
        basename = os.path.splitext(basename)[0]
        image_file = os.path.join(object_folder, 'result', basename + '.mat')

        # Read in result and label
        mat_content = matlab.load(image_file)
        score = mat_content['mask']
        prediction = score > 0.5
        prediction = prediction.astype('float')

        label = KSimage.imread(label_file[0])
        label = label.astype('float')
        label = label / 255.0
        label = label > 0.5
        label = label.astype('float')

        prediction = np.reshape(prediction, -1)
        label = np.reshape(label, -1)
        score = np.reshape(score, -1)

        all_prediction.append(prediction)
        all_label.append(label)
        all_score.append(score)

        f1score = metrics.f1_score(label, prediction, average='binary')
        f1score_per_image.append(f1score)

        duration = time.time() - tick
        print('evaluate %d / %d (%.2f sec)' %
              (i_image + 1, len(test_image_filenames), duration))

    all_label = np.reshape(np.array(all_label), -1)
    all_prediction = np.reshape(np.array(all_prediction), -1)
    all_score = np.reshape(np.array(all_score), -1)

    total_f1score = metrics.f1_score(all_label,
                                     all_prediction,
                                     average='binary')
    avg_f1score = np.mean(f1score_per_image)
    average_precision = metrics.average_precision_score(all_label,
                                                        all_score,
                                                        average='micro')

    return total_f1score, avg_f1score, average_precision, f1score_per_image
Exemplo n.º 9
0
def calculate_mean_variance_image(object_folder, flags):
    key_values = list(flags['dict_path'].keys())
    key_values.remove('group')

    # Setup
    network_stats_file_path = os.path.join(object_folder, 'checkpoint',
                                           'network_stats.mat')
    routine.create_dir(os.path.join(object_folder, 'checkpoint'))
    mean_dict = {}

    for key in key_values:
        image_folder = os.path.join(object_folder, 'train', key)
        list_images = glob.glob(
            os.path.join(image_folder, '*' + flags['dict_ext'][key]))

        image = KSimage.imread(list_images[0])
        #if np.random.randint(2, size=1) == 1:
        #    image = np.flipud(image)
        #if np.random.randint(2, size=1) == 1:
        #    image = np.fliplr(image)
        image = np.float32(image)

        mean_image = image
        variance_image = np.zeros(shape=image.shape, dtype=np.float32)

        for t, image_file in enumerate(list_images[1:]):
            image = KSimage.imread(image_file)

            # image = np.dstack((image[:, :, 2], image[:, :, 1], image[:, :, 0]))

            #if np.random.randint(2, size=1) == 1:
            #    image = np.flipud(image)
            #if np.random.randint(2, size=1) == 1:
            #    image = np.fliplr(image)
            image = np.float32(image)

            mean_dict[key + '_mean'] = (np.float32(t + 1) * mean_image +
                                        image) / np.float32(t + 2)

            mean_dict[key + '_var'] = np.float32(t + 1) / np.float32(t + 2) * variance_image \
                             + np.float32(1) / np.float32(t + 1) * ((image - mean_image) ** 2)

            print('calculate mean and variance: processing %d out of %d' %
                  (t + 2, len(list_images)))

    matlab.save(network_stats_file_path, mean_dict)
Exemplo n.º 10
0
def calculate_mean_variance_image(list_images):
    """
    calculate_mean_variance_image simply the mean
    and variance for a list of images at each pixel
    position

    param: list_images
    return: mean_image, variance_image
    """

    image = KSimage.imread(list_images[0])

    if np.random.randint(2, size=1) == 1:
        image = np.flipud(image)
    if np.random.randint(2, size=1) == 1:
        image = np.fliplr(image)
    image = np.float32(image)

    mean_image = image
    variance_image = np.zeros(shape=image.shape, dtype=np.float32)

    for t, image_file in enumerate(list_images[1:]):
        image = KSimage.imread(image_file)

        if np.random.randint(2, size=1) == 1:
            image = np.flipud(image)
        if np.random.randint(2, size=1) == 1:
            image = np.fliplr(image)
        image = np.float32(image)

        mean_image = (np.float32(t + 1) * mean_image + image) / np.float32(t +
                                                                           2)

        variance_image = np.float32(t + 1) / np.float32(t + 2) * variance_image \
                         + np.float32(1) / np.float32(t + 1) * ((image - mean_image) ** 2)

        print('calculate mean and variance: processing %d out of %d' %
              (t + 2, len(list_images)))
    return mean_image, variance_image
Exemplo n.º 11
0
def gen_train_val_data(nth_fold, flags):
    """
    gen_train_val_data generates training and validation data for training the network. It builds
    directories for train and test and extract patches according to the provided 'method', and it
    maintains a log file containing the contents of all the data splits

    param: nth_fold
    param method: sliding_window
    return: void
    """

    ########## check whether 'cv' or 'perm' exists and which one to use ##########
    list_dir = os.listdir(os.path.join(flags['experiment_folder']))
    if ('cv' + str(nth_fold) in list_dir) and ('perm' + str(nth_fold)
                                               in list_dir):
        raise ValueError('Dangerous! You have both cv and perm on the path.')
    elif 'cv' + str(nth_fold) in list_dir:
        object_folder = os.path.join(flags['experiment_folder'],
                                     'cv' + str(nth_fold))
    elif 'perm' + str(nth_fold) in list_dir:
        object_folder = os.path.join(flags['experiment_folder'],
                                     'perm' + str(nth_fold))
    else:
        raise ValueError('No cv or perm folder!')

    ########## create train and val paths ##########
    path_dict = dict()
    path_dict['train_folder'] = os.path.join(object_folder, 'train')
    path_dict['val_folder'] = os.path.join(object_folder, 'val')
    create_dir(path_dict['train_folder'])
    create_dir(path_dict['val_folder'])

    print("Gets to the beginning of an if statement")
    ########## extract patches and put in a designated directory ##########
    if flags['gen_train_val_method'] == 'sliding_window':

        key_list = ['image', 'groundtruth', 'weight']

        for key in key_list:
            path_dict['train_' + key + '_folder'] = os.path.join(
                path_dict['train_folder'], key)
            create_dir(path_dict['train_' + key + '_folder'])
            path_dict['val_' + key + '_folder'] = os.path.join(
                path_dict['val_folder'], key)
            create_dir(path_dict['val_' + key + '_folder'])

        list_dict = dict()
        for key in key_list:
            list_dict['train_' + key + '_list'] = KScsv.read_csv(
                os.path.join(object_folder, 'train_' + key + '_list.csv'))
            list_dict['val_' + key + '_list'] = KScsv.read_csv(
                os.path.join(object_folder, 'val_' + key + '_list.csv'))

        ########## train ##########
        for key in ['train', 'val']:
            if not os.path.isfile(
                    os.path.join(path_dict[key + '_folder'],
                                 key + '_log.csv')):
                log_data = list()

                for i_image in range(len(list_dict[key + '_image_list'])):

                    tic = time.time()

                    path_image = list_dict[key + '_image_list'][i_image][0]
                    path_groundtruth = list_dict[
                        key + '_groundtruth_list'][i_image][0]
                    path_weight = list_dict[key + '_weight_list'][i_image][0]

                    #Resize image, groundtruth, and weight from 10x input size to 2.5x (level at which network operates)
                    image = KSimage.imread(path_image)
                    image = KSimage.imresize(image, 0.25)

                    groundtruth = KSimage.imread(path_groundtruth)
                    groundtruth = KSimage.imresize(groundtruth, 0.25)

                    weight = KSimage.imread(path_weight)
                    weight = KSimage.imresize(weight, 0.25)

                    #make sure that groundtruth images have depth = 1
                    if (len(groundtruth.shape) > 2
                            and groundtruth.shape[2] > 1):
                        groundtruth = groundtruth[:, :, 1]

                    groundtruth[
                        groundtruth ==
                        3] = 2  #remove all intra-stromal epithelium labels and set them simply to stroma
                    groundtruth[
                        groundtruth ==
                        4] = 3  #fat label was originally 4 but is now changed to 3

                    dict_obj = {
                        'image': image,
                        'groundtruth': groundtruth,
                        'weight': weight
                    }

                    extractor = extract_patches.sliding_window(
                        dict_obj, flags['size_input_patch'],
                        flags['size_output_patch'], flags['stride'])

                    for j, (out_obj_dict, coord_dict) in enumerate(extractor):
                        images = out_obj_dict['image']
                        groundtruths = out_obj_dict['groundtruth']
                        weights = out_obj_dict['weight']
                        coord_images = coord_dict['image']

                        #############################################################

                        basename = os.path.basename(path_image)
                        basename = os.path.splitext(basename)[0]

                        image_name = os.path.join(
                            path_dict[key + '_image_folder'], basename +
                            '_idx' + str(j) + '_row' + str(coord_images[0]) +
                            '_col' + str(coord_images[1]) + flags['image_ext'])
                        label_name = os.path.join(
                            path_dict[key + '_groundtruth_folder'],
                            basename + '_idx' + str(j) + '_row' +
                            str(coord_images[0]) + '_col' +
                            str(coord_images[1]) + flags['groundtruth_ext'])
                        weight_name = os.path.join(
                            path_dict[key + '_weight_folder'],
                            basename + '_idx' + str(j) + '_row' +
                            str(coord_images[0]) + '_col' +
                            str(coord_images[1]) + flags['weight_ext'])

                        if not os.path.isfile(image_name):
                            KSimage.imwrite(images, image_name)

                        if not os.path.isfile(label_name):
                            KSimage.imwrite(groundtruths, label_name)

                        if not os.path.isfile(weight_name):
                            KSimage.imwrite(weights, weight_name)

                        log_data.append((image_name, label_name, weight_name))

                    print('finish processing %d image from %d images : %.2f' %
                          (i_image + 1, len(list_dict[key + '_image_list']),
                           time.time() - tic))

                KScsv.write_csv(
                    log_data,
                    os.path.join(path_dict[key + '_folder'], key + '_log.csv'))

    ####################################################################################################################
    else:
        print(
            "ONLY SLIDING WINDOW TRAINING IS SUPPORTED!!!! Training terminated."
        )
        return
Exemplo n.º 12
0
def batch_processing(filename, sess, logits_test, parameters, images_test,
                     keep_prob, mean_image, variance_image, flags,
                     he_dcis_segmentation_path):
    # Read image and extract patches
    patches, patches_mask, image_size, nPatches, ori_dim = tf_model_input_test.read_data_test(
        filename, flags, he_dcis_segmentation_path)

    def batches(generator, size):
        source = generator
        while True:
            chunk = [val for _, val in izip(xrange(size), source)]
            if not chunk:
                raise StopIteration
            yield chunk

    # Construct batch indices
    batch_index = range(0, nPatches, flags['test_batch_size'])
    if nPatches not in batch_index:
        batch_index.append(nPatches)

    # Process all_patches
    shape = np.hstack([nPatches, flags['size_output_patch']])
    shape[-1] = logits_test.get_shape()[3].value
    all_patches = np.zeros(shape, dtype=np.float32)

    for ipatch, chunk in enumerate(
            zip(batches(patches, flags['test_batch_size']),
                batches(patches_mask, flags['test_batch_size']))):
        # for ipatch in range(len(batch_index) - 1):
        #
        # start_time = time.time()
        start_idx = batch_index[ipatch]
        end_idx = batch_index[ipatch + 1]

        tmp = list()
        for i in range(len(chunk[1])):
            tmp.append(np.sum(chunk[1][i] == 255.0) / float(chunk[1][i].size))

        if np.any(np.array(tmp) > 0.0):
            # temp = tf_model_input_test.inputs_test(patches[start_idx:end_idx, :, :, :], mean_image, variance_image)
            temp = tf_model_input_test.inputs_test(chunk[0], mean_image,
                                                   variance_image)

            if temp.shape[0] < flags['test_batch_size']:
                rep = np.tile(
                    temp[-1, :, :, :],
                    [flags['test_batch_size'] - temp.shape[0], 1, 1, 1])
                temp = np.vstack([temp, rep])

            pred, paras = sess.run([logits_test, parameters],
                                   feed_dict={
                                       images_test: temp,
                                       keep_prob: 1.0
                                   })
            # expand single pixel prediction into patch
            # pred = np.lib.pad(pred, ((0, 0), (int(flags.size_output_patch[0]/2.0), int(flags.size_output_patch[0]/2.0)-1),
            #                           (int(flags.size_output_patch[0]/2.0), int(flags.size_output_patch[0]/2.0) - 1), (0, 0)), 'edge')

        else:
            shape = np.hstack(
                [flags['test_batch_size'], flags['size_output_patch']])
            shape[-1] = logits_test.get_shape()[3].value
            pred = np.zeros(shape, dtype=np.float32)
            for j in range(flags['test_batch_size']):
                x = pred[j, :, :, :]
                x[:, :, 0] = 1.0
                pred[j, :, :, :] = x

        all_patches[start_idx:end_idx, :, :, :] = pred[
            range(end_idx - start_idx), :, :, :]

        # duration = time.time() - start_time
        # print('processing step %d/%d (%.2f sec/step)' % (ipatch + 1, len(batch_index) - 1, duration))

    result = tf_model_input_test.MergePatches_test(
        all_patches, flags['stride_test'], image_size,
        flags['size_input_patch'], flags['size_output_patch'], flags)

    result = result * 255.0
    result = result.astype(np.uint8)
    result = KSimage.imresize(result, 2.0)
    result = np.argmax(result, axis=2)

    # resize may not preserve the original dimensions of the image
    # append with zero or remove excessive pixels in each dimension
    if result.shape[0] < ori_dim[0]:
        result = np.pad(result, ((0, ori_dim[0] - result.shape[0]), (0, 0)),
                        'constant',
                        constant_values=0)
    else:
        result = result[0:ori_dim[0], :]

    if result.shape[1] < ori_dim[1]:
        result = np.pad(result, ((0, 0), (0, ori_dim[1] - result.shape[1])),
                        'constant',
                        constant_values=0)
    else:
        result = result[:, 0:ori_dim[1]]

    im_in = result == 1
    im_in = im_in * 255.0
    im_in = im_in.astype(np.uint8)

    im_out = KSimage.imfill(im_in)
    im_out = im_out * 255.0
    im_out = im_out.astype(np.uint8)

    return im_out
Exemplo n.º 13
0
def test(object_folder, model_path, filename_list, flags):
    """
    test uses either whole image segmentation or patch based
    segmentation to segment an entire directory of test images

    param: object_folder
    param: model_path
    param: filename_list
    param: gpu
    param: gpu_memory_fraction
    return: writes segmentation result to appropriate image file
    """

    checkpoint_dir = os.path.join(object_folder, 'checkpoint')
    mat_contents = matlab.load(
        os.path.join(checkpoint_dir, 'network_stats.mat'))
    mean_image = np.float32(mat_contents['mean_image'])
    variance_image = np.float32(mat_contents['variance_image'])
    startTime = time.time()

    with tf.Graph().as_default(), tf.device(flags['gpu']):
        keep_prob = tf.placeholder(tf.float32)

        # Place holder for patches
        images_test = tf.placeholder(tf.float32,
                                     shape=(np.hstack([
                                         flags['test_batch_size'],
                                         flags['size_input_patch']
                                     ])))

        # Network
        with tf.variable_scope("network") as scope:
            logits_test, parameters = tf_model.inference(
                images_test, keep_prob, flags)

        # Saver and initialisation
        saver = tf.train.Saver()
        init = tf.initialize_all_variables()

        gpu_options = tf.GPUOptions(
            per_process_gpu_memory_fraction=flags['gpu_memory_fraction'])
        config = tf.ConfigProto(allow_soft_placement=True,
                                gpu_options=gpu_options)

        with tf.Session(config=config) as sess:
            # Initialise and load variables
            sess.run(init)
            saver.restore(sess, model_path)

            result_dir = os.path.join(object_folder, 'result')
            routine.create_dir(result_dir)
            print("FILENAME LIST", filename_list)
            for iImage, file in enumerate(filename_list):
                file = file[0]
                basename = os.path.basename(file)
                basename = os.path.splitext(basename)[0]
                savename = os.path.join(result_dir, basename + '.png')
                if not os.path.exists(savename):
                    print('processing image %d/%d' %
                          (iImage + 1, len(filename_list)))
                    result = batch_processing(file, sess, logits_test,
                                              parameters, images_test,
                                              keep_prob, mean_image,
                                              variance_image, flags)

                    KSimage.imwrite(result, savename)
            print("Total Time: " + str(time.time() - startTime))
Exemplo n.º 14
0
he_dcis_segmentation_result_path = os.path.join('Result_tumour')

dict_path = {'he': he_dir}
dict_ext = {'he': '.tiff'}
gpu_list = ['0']

#######################################################################
# generate mask
mask_path = 'Mask'
routine.create_dir(mask_path)

files = glob.glob(os.path.join(dict_path['he'], '*' + dict_ext['he']))

for file in files:
    basename = os.path.basename(file)
    basename = os.path.splitext(basename)[0]
    savename = os.path.join(mask_path, basename + '.png')

    I = KSimage.imread(file)
    mask = 255 * np.ones(shape=(I.shape[0], I.shape[1]), dtype=np.uint8)
    KSimage.imwrite(mask, savename)

#######################################################################
# he cell segmentation
Modules.he_cell_segmentation(he_dir, dict_ext, mask_path,
                             he_cell_segmentation_result_path, gpu_list)

# he tumour_seg
Modules.he_dcis_segmentation(he_dir, dict_ext,
                             he_dcis_segmentation_result_path, gpu_list)
Exemplo n.º 15
0
    # Perform Color Deconvolution of Source Image to get stain concentration matrix
    C, M_source = deconvolve(source, M_source, I0_source)

    # Vectorize to Nx3 matrix
    C = C.reshape(-1, 3)

    # Find the 99th percentile of stain concentration(for each channel)
    max_C_source = np.percentile(a=C, q=99, axis=0)

    # main normalisation
    C = C / max_C_source
    C = C * max_C_target

    # Reconstruct the RGB image
    norm = I0_source * np.exp(np.matmul(C, -M_target)) - 1
    norm = norm.reshape(h, w, 3)
    norm = norm.clip(0, I0_source).astype(source.dtype)

    return norm


#####################################################################
target = KSimage.imread('1_421_1_2_7_999_11.jpg')
source = KSimage.imread('b001.tif')

norm = stain_normalisation_macenko(source, target)

KSimage.imwrite(norm, 'result.tiff')

print('done')
Exemplo n.º 16
0
def batch_processing(filename, sess, logits_test, parameters, images_test, keep_prob, mean_image, variance_image, flags):
    """
    batch_processing reads in an image, splits it up into patches,
    preprocesses the patches, passes them through the network, and returns
    the results

    param: filename
    param: sess
    param: logits_test
    param: parameters
    param: images_test
    param: keep_prob
    param: mean_image
    param: variance_image
    param: test_batch_size
    param: size_output_patch
    return: result
    """

    # Read image and extract patches
    patches, patches_mask, image_size, nPatches = tf_model_input_test.read_data_test(filename, flags)

    def batches(generator, size):
        source = generator
        while True:
            chunk = [val for _, val in izip(xrange(size), source)]
            if not chunk:
                raise StopIteration
            yield chunk

    # Construct batch indices
    batch_index = range(0, nPatches, flags['test_batch_size'])
    if nPatches not in batch_index:
        batch_index.append(nPatches)

    # Process all_patches
    shape = np.hstack([nPatches, flags['size_output_patch']])
    shape[-1] = logits_test.get_shape()[3].value
    all_patches = np.zeros(shape, dtype=np.float32)

    for ipatch, chunk in enumerate(zip(batches(patches, flags['test_batch_size']),
                                       batches(patches_mask, flags['test_batch_size']))):
        start_time = time.time()
        start_idx = batch_index[ipatch]
        end_idx = batch_index[ipatch + 1]

        tmp = list()
        for i in range(len(chunk[1])):
            tmp.append(np.sum(chunk[1][i]==255.0)/float(chunk[1][i].size))

        # process batch if any patch within it has >=50% uncovered by mask --> make sure to understand. white = uncovered by mask
        if np.any(np.array(tmp) > 0.5):
            temp = tf_model_input_test.inputs_test(chunk[0], mean_image, variance_image)

            if temp.shape[0] < flags['test_batch_size']:
                rep = np.tile(temp[-1, :, :, :], [flags['test_batch_size'] - temp.shape[0], 1, 1, 1])
                temp = np.vstack([temp, rep])

            pred, paras = sess.run([logits_test, parameters], feed_dict={images_test: temp, keep_prob: 1.0})

        else:
            shape = np.hstack([flags['test_batch_size'], flags['size_output_patch']])
            shape[-1] = logits_test.get_shape()[3].value
            pred = np.zeros(shape, dtype=np.float32)
            for j in range(flags['test_batch_size']):
                x = pred[j,:,:,:]
                x[:,:,0] = 1.0
                pred[j,:,:,:] = x

        all_patches[start_idx:end_idx, :, :, :] = pred[range(end_idx - start_idx), :, :, :]

        duration = time.time() - start_time
        print('processing step %d/%d (%.2f sec/step)' % (ipatch + 1, len(batch_index) - 1, duration))

    #this is where all the patches are combined. the issue is --> I NEED A CERTAINTY FOR EVERY INDIVIDUAL PATCH
    result = tf_model_input_test.MergePatches_test(all_patches, flags['stride_test'], image_size, flags['size_input_patch'], flags['size_output_patch'], flags)

    result = tf.squeeze(result)
    result = np.asarray(result.eval())

    result = result * 255.0
    result = result.astype(np.uint8) 
    result = np.argmax(result, axis=2) #TODO: Changed from argmax to max
    result = KSimage.imresize(result, 4.0)
 
    return result 
Exemplo n.º 17
0
def batch_processing(filename, sess, logits_test, parameters, images_test,
                     keep_prob, mean_image, variance_image, flags):
    # Read image and extract patches
    patches, image_size, nPatches, ori_dim = tf_model_input_test.read_data_test(
        filename, flags)

    def batches(generator, size):
        source = generator
        while True:
            chunk = [val for _, val in izip(xrange(size), source)]
            if not chunk:
                raise StopIteration
            yield chunk

    # Construct batch indices
    batch_index = range(0, nPatches, flags['test_batch_size'])
    if nPatches not in batch_index:
        batch_index.append(nPatches)

    # Process all_patches
    shape = np.hstack([nPatches, flags['size_output_patch']])
    shape[-1] = logits_test.get_shape()[3].value
    all_patches = np.zeros(shape, dtype=np.float32)

    for ipatch, chunk in enumerate(batches(patches, flags['test_batch_size'])):
        start_idx = batch_index[ipatch]
        end_idx = batch_index[ipatch + 1]

        temp = tf_model_input_test.inputs_test(chunk, mean_image,
                                               variance_image)

        if temp.shape[0] < flags['test_batch_size']:
            rep = np.tile(temp[-1, :, :, :],
                          [flags['test_batch_size'] - temp.shape[0], 1, 1, 1])
            temp = np.vstack([temp, rep])

        pred = sess.run(logits_test,
                        feed_dict={
                            images_test: temp,
                            keep_prob: 1.0
                        })
        all_patches[start_idx:end_idx, :, :, :] = pred[
            range(end_idx - start_idx), :, :, :]

    result = tf_model_input_test.MergePatches_test(
        all_patches, flags['stride_test'], image_size,
        flags['size_input_patch'], flags['size_output_patch'], flags)
    result = result * 255.0
    result = result.astype(np.uint8)
    result = KSimage.imresize(result, 4.0)
    result = np.argmax(result, axis=2)

    # resize may not preserve the original dimensions of the image
    # append with zero or remove excessive pixels in each dimension
    if result.shape[0] < ori_dim[0]:
        result = np.pad(result, ((0, ori_dim[0] - result.shape[0]), (0, 0)),
                        'constant',
                        constant_values=0)
    else:
        result = result[0:ori_dim[0], :]

    if result.shape[1] < ori_dim[1]:
        result = np.pad(result, ((0, 0), (0, ori_dim[1] - result.shape[1])),
                        'constant',
                        constant_values=0)
    else:
        result = result[:, 0:ori_dim[1]]

    mask = result == 1
    mask = mask.astype(np.uint8) * 255
    im, contours, hierarchy = cv2.findContours(mask, cv2.RETR_TREE,
                                               cv2.CHAIN_APPROX_SIMPLE)

    temp_mask = np.zeros(mask.shape[:2], dtype='uint8')
    for cnt in contours:
        area = cv2.contourArea(cnt)
        if area > 500**2:
            cv2.drawContours(temp_mask, [cnt], -1, 255, -1)

    result = temp_mask

    return result
Exemplo n.º 18
0
def test(object_folder, model_path, filename_list, flags):
    """
    test uses either whole image segmentation or patch based
    segmentation to segment an entire directory of test images

    param: object_folder
    param: model_path
    param: filename_list
    param: gpu
    param: gpu_memory_fraction
    return: writes segmentation result to appropriate image file
    """

    checkpoint_dir = os.path.join(object_folder, 'checkpoint')
    mat_contents = matlab.load(os.path.join(checkpoint_dir, 'network_stats.mat'))
    mean_image = np.float32(mat_contents['mean_image'])
    variance_image = np.float32(mat_contents['variance_image'])

    # turns 256 x 256 x 3 into 1 x 1 x 3
    mean_image_new = np.array([mean_image[:, :, 0].mean(), mean_image[:, :, 1].mean(), mean_image[:, :, 2].mean()])
    variance_image_new = np.array([variance_image[:, :, 0].mean(), variance_image[:, :, 1].mean(), variance_image[:, :, 2].mean()])

    with tf.Graph().as_default(), tf.device(flags['gpu']):
        keep_prob = tf.placeholder(tf.float32)

        # Place holder for patches
        images_test = tf.placeholder(tf.float32)

        # Network
        with tf.variable_scope("network") as scope:
            logits_test, parameters = tf_model.inference(images_test, keep_prob, flags)

        # Saver and initialisation
        saver = tf.train.Saver()
        init = tf.global_variables_initializer()

        gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=flags['gpu_memory_fraction'])
        config = tf.ConfigProto(allow_soft_placement=True, gpu_options=gpu_options)

        with tf.Session(config=config) as sess:
            # Initialise and load variables
            sess.run(init)
            saver.restore(sess, model_path)

            result_dir = os.path.join(object_folder, 'result')
            create_dir(result_dir)

            start_time = time.time()
            for iImage, file in enumerate(filename_list):
                file = file[0]
                basename = os.path.basename(file)
                basename = os.path.splitext(basename)[0]
                savename = os.path.join(result_dir, basename + '.png')

                
        print('processing image %d/%d' % (iImage + 1, len(filename_list)))
        print("FILE!!!!!!!!!!!" + str(file))

        if (flags['use_patches'] == False):
            result = whole_image_processing(file, sess, logits_test, parameters, images_test, keep_prob, mean_image_new, variance_image_new, flags)
        else:
            result = batch_processing(file, sess, logits_test, parameters, images_test, keep_prob mean_image, variance_image, flags)

        print("Image processed")
        KSimage.imwrite(result, savename) #Write result back to image once segmentation is fixed
        print("TOTAL DURATION : " + str(time.time() - start_time))