コード例 #1
0
def read_whole_image_test(filename, flags):
    """
    read_whole_image_test reads in the test image, resizes it
    from 10x to 2.5x, and returns it along with its shape

    param: filename
    return: image, dcis_mask, image.shape
    """

    image = KSimage.imread(filename)
    basename = os.path.basename(filename)
    dcis_mask_file = os.path.join('experiment_dcis_segmentation', 'perm1',
                                  'result', basename)

    if os.path.exists(dcis_mask_file):
        dcis_mask = KSimage.imread(dcis_mask_file)
    else:
        dcis_mask = np.ones(shape=(image.shape[0], image.shape[1])) * 255.0
        dcis_mask = dcis_mask.astype(np.uint8)

    #Resizing from 10x to 2.5x
    image = KSimage.imresize(image, 0.25)
    dcis_mask = KSimage.imresize(dcis_mask, 0.25)

    if image.ndim == 2:
        image = np.expand_dims(image, axis=3)

    if dcis_mask.ndim == 2:
        dcis_mask = np.expand_dims(dcis_mask, axis=3)

    return image, dcis_mask, image.shape
コード例 #2
0
def calculate_mean_variance_image(list_images):
    image = KSimage.imread(list_images[0])

    if np.random.randint(2, size=1) == 1:
        image = np.flipud(image)
    if np.random.randint(2, size=1) == 1:
        image = np.fliplr(image)
    image = np.float32(image)

    mean_image = image
    variance_image = np.zeros(shape=image.shape, dtype=np.float32)

    for t, image_file in enumerate(list_images[1:]):
        image = KSimage.imread(image_file)

        # image = np.dstack((image[:, :, 2], image[:, :, 1], image[:, :, 0]))

        if np.random.randint(2, size=1) == 1:
            image = np.flipud(image)
        if np.random.randint(2, size=1) == 1:
            image = np.fliplr(image)
        image = np.float32(image)

        mean_image = (np.float32(t + 1) * mean_image + image) / np.float32(t +
                                                                           2)

        variance_image = np.float32(t + 1) / np.float32(t + 2) * variance_image \
                         + np.float32(1) / np.float32(t + 1) * ((image - mean_image) ** 2)

        print('calculate mean and variance: processing %d out of %d' %
              (t + 2, len(list_images)))
    return mean_image, variance_image
コード例 #3
0
def read_data_test(filename, flags, dcis_segmentation_result_path):
    stride = flags['stride_test']

    image = KSimage.imread(filename)

    files = [
        f for f in os.listdir(dcis_segmentation_result_path)
        if os.path.isfile(os.path.join(dcis_segmentation_result_path, f))
    ]

    basename = os.path.basename(filename)
    basename = os.path.splitext(basename)[0]
    pos = [m.start() for m in re.finditer('_', basename)]
    # basename = basename[0:pos[3] + 1]

    basename = [x for x in files if basename in x][0]

    dcis_mask_file = os.path.join(dcis_segmentation_result_path, basename)
    if os.path.exists(dcis_mask_file):
        dcis_mask = KSimage.imread(dcis_mask_file)
    else:
        dcis_mask = np.ones(shape=(image.shape[0], image.shape[1], 1)) * 255.0
        dcis_mask = dcis_mask.astype(np.uint8)

    image = KSimage.imresize(image, 2.0)
    dcis_mask = KSimage.imresize(dcis_mask, 2.0)

    if image.ndim == 2:
        image = np.expand_dims(image, axis=3)

    if dcis_mask.ndim == 2:
        dcis_mask = np.expand_dims(dcis_mask, axis=3)

    padrow = flags['size_input_patch'][0]
    padcol = flags['size_input_patch'][1]

    image = np.lib.pad(image, ((padrow, padrow), (padcol, padcol), (0, 0)),
                       'symmetric')
    dcis_mask = np.lib.pad(dcis_mask,
                           ((padrow, padrow), (padcol, padcol), (0, 0)),
                           'symmetric')

    # extract patches
    patches = ExtractPatches_test(flags['size_input_patch'], stride, image)
    patches_mask = ExtractPatches_test(flags['size_input_patch'], stride,
                                       dcis_mask)

    ntimes_row = int(
        np.floor((image.shape[0] - flags['size_input_patch'][0]) /
                 float(stride[0])) + 1)
    ntimes_col = int(
        np.floor((image.shape[1] - flags['size_input_patch'][1]) /
                 float(stride[1])) + 1)
    rowRange = range(0, ntimes_row * stride[0], stride[0])
    colRange = range(0, ntimes_col * stride[1], stride[1])

    nPatches = len(rowRange) * len(colRange)

    return patches, patches_mask, image.shape, nPatches
コード例 #4
0
def read_data_test(filename, flags):
    """
    read_data_test reads in the test image, resizes it
    from 10x to 2.5x, and extracts patches from the image

    param: filename
    param: stride_test
    return: patches, patches_mask, image.shape, nPatches
    """

    stride = flags['stride_test']

    image = KSimage.imread(filename)
    image = KSimage.imresize(image, 0.25)  #Resizing from 10x to 2.5x
    basename = os.path.basename(filename)
    dcis_mask_file = os.path.join('experiment_epi_stromal_segmentation',
                                  'perm1', 'result', basename)
    if os.path.exists(dcis_mask_file):
        dcis_mask = KSimage.imread(dcis_mask_file)
    else:
        dcis_mask = np.ones(shape=(image.shape[0], image.shape[1])) * 255.0
        dcis_mask = dcis_mask.astype(np.uint8)

    if image.ndim == 2:
        image = np.expand_dims(image, axis=3)

    if dcis_mask.ndim == 2:
        dcis_mask = np.expand_dims(dcis_mask, axis=3)

    padrow = flags['size_input_patch'][0]
    padcol = flags['size_input_patch'][1]

    image = np.lib.pad(image, ((padrow, padrow), (padcol, padcol), (0, 0)),
                       'symmetric')
    dcis_mask = np.lib.pad(dcis_mask,
                           ((padrow, padrow), (padcol, padcol), (0, 0)),
                           'symmetric')

    print("INPUT SHAPE: " + str(image.shape))

    # extract patches
    patches = ExtractPatches_test(flags['size_input_patch'], stride, image)
    patches_mask = ExtractPatches_test(flags['size_input_patch'], stride,
                                       dcis_mask)

    ntimes_row = int(
        np.floor((image.shape[0] - flags['size_input_patch'][0]) /
                 float(stride[0])) + 1)
    ntimes_col = int(
        np.floor((image.shape[1] - flags['size_input_patch'][1]) /
                 float(stride[1])) + 1)
    rowRange = range(0, ntimes_row * stride[0], stride[0])
    colRange = range(0, ntimes_col * stride[1], stride[1])

    nPatches = len(rowRange) * len(colRange)

    return patches, patches_mask, image.shape, nPatches
コード例 #5
0
def evaluate(object_folder):
    test_images_list = os.path.join(object_folder, 'test_images_list.csv')
    test_labels_list = os.path.join(object_folder, 'test_labels_list.csv')

    test_image_filenames = KScsv.read_csv(test_images_list)
    test_label_filenames = KScsv.read_csv(test_labels_list)

    all_prediction = list()
    all_label = list()
    f1score_per_image = list()
    all_score = list()
    for i_image, (image_file, label_file) in enumerate(
            zip(test_image_filenames, test_label_filenames)):

        tick = time.time()

        basename = os.path.basename(image_file[0])
        basename = os.path.splitext(basename)[0]
        image_file = os.path.join(object_folder, 'result', basename + '.mat')

        # Read in result and label
        mat_content = matlab.load(image_file)
        score = mat_content['mask']
        prediction = score > 0.5
        prediction = prediction.astype('float')

        label = KSimage.imread(label_file[0])
        label = label.astype('float')
        label = label / 255.0
        label = label > 0.5
        label = label.astype('float')

        prediction = np.reshape(prediction, -1)
        label = np.reshape(label, -1)
        score = np.reshape(score, -1)

        all_prediction.append(prediction)
        all_label.append(label)
        all_score.append(score)

        f1score = metrics.f1_score(label, prediction, average='binary')
        f1score_per_image.append(f1score)

        duration = time.time() - tick
        print('evaluate %d / %d (%.2f sec)' %
              (i_image + 1, len(test_image_filenames), duration))

    all_label = np.reshape(np.array(all_label), -1)
    all_prediction = np.reshape(np.array(all_prediction), -1)
    all_score = np.reshape(np.array(all_score), -1)

    total_f1score = metrics.f1_score(all_label,
                                     all_prediction,
                                     average='binary')
    avg_f1score = np.mean(f1score_per_image)
    average_precision = metrics.average_precision_score(all_label,
                                                        all_score,
                                                        average='micro')

    return total_f1score, avg_f1score, average_precision, f1score_per_image
コード例 #6
0
def read_data_test(filename, flags):
    stride = flags['stride_test']

    image = KSimage.imread(filename)
    ori_dim = image.shape
    image = KSimage.imresize(image, 0.25)  # resize to 1/4 times

    if image.ndim == 2:
        image = np.expand_dims(image, axis=3)

    padrow = flags['size_input_patch'][0]
    padcol = flags['size_input_patch'][1]

    image = np.lib.pad(image, ((padrow, padrow), (padcol, padcol), (0, 0)),
                       'symmetric')

    # extract patches
    patches = ExtractPatches_test(flags['size_input_patch'], stride, image)

    ntimes_row = int(
        np.floor((image.shape[0] - flags['size_input_patch'][0]) /
                 float(stride[0])) + 1)
    ntimes_col = int(
        np.floor((image.shape[1] - flags['size_input_patch'][1]) /
                 float(stride[1])) + 1)
    rowRange = range(0, ntimes_row * stride[0], stride[0])
    colRange = range(0, ntimes_col * stride[1], stride[1])

    nPatches = len(rowRange) * len(colRange)

    return patches, image.shape, nPatches, ori_dim
コード例 #7
0
def calculate_mean_variance_image(object_folder, flags):
    key_values = list(flags['dict_path'].keys())
    key_values.remove('group')

    # Setup
    network_stats_file_path = os.path.join(object_folder, 'checkpoint',
                                           'network_stats.mat')
    routine.create_dir(os.path.join(object_folder, 'checkpoint'))
    mean_dict = {}

    for key in key_values:
        image_folder = os.path.join(object_folder, 'train', key)
        list_images = glob.glob(
            os.path.join(image_folder, '*' + flags['dict_ext'][key]))

        image = KSimage.imread(list_images[0])
        #if np.random.randint(2, size=1) == 1:
        #    image = np.flipud(image)
        #if np.random.randint(2, size=1) == 1:
        #    image = np.fliplr(image)
        image = np.float32(image)

        mean_image = image
        variance_image = np.zeros(shape=image.shape, dtype=np.float32)

        for t, image_file in enumerate(list_images[1:]):
            image = KSimage.imread(image_file)

            # image = np.dstack((image[:, :, 2], image[:, :, 1], image[:, :, 0]))

            #if np.random.randint(2, size=1) == 1:
            #    image = np.flipud(image)
            #if np.random.randint(2, size=1) == 1:
            #    image = np.fliplr(image)
            image = np.float32(image)

            mean_dict[key + '_mean'] = (np.float32(t + 1) * mean_image +
                                        image) / np.float32(t + 2)

            mean_dict[key + '_var'] = np.float32(t + 1) / np.float32(t + 2) * variance_image \
                             + np.float32(1) / np.float32(t + 1) * ((image - mean_image) ** 2)

            print('calculate mean and variance: processing %d out of %d' %
                  (t + 2, len(list_images)))

    matlab.save(network_stats_file_path, mean_dict)
コード例 #8
0
def calculate_mean_variance_image(list_images):
    """
    calculate_mean_variance_image simply the mean
    and variance for a list of images at each pixel
    position

    param: list_images
    return: mean_image, variance_image
    """

    image = KSimage.imread(list_images[0])

    if np.random.randint(2, size=1) == 1:
        image = np.flipud(image)
    if np.random.randint(2, size=1) == 1:
        image = np.fliplr(image)
    image = np.float32(image)

    mean_image = image
    variance_image = np.zeros(shape=image.shape, dtype=np.float32)

    for t, image_file in enumerate(list_images[1:]):
        image = KSimage.imread(image_file)

        if np.random.randint(2, size=1) == 1:
            image = np.flipud(image)
        if np.random.randint(2, size=1) == 1:
            image = np.fliplr(image)
        image = np.float32(image)

        mean_image = (np.float32(t + 1) * mean_image + image) / np.float32(t +
                                                                           2)

        variance_image = np.float32(t + 1) / np.float32(t + 2) * variance_image \
                         + np.float32(1) / np.float32(t + 1) * ((image - mean_image) ** 2)

        print('calculate mean and variance: processing %d out of %d' %
              (t + 2, len(list_images)))
    return mean_image, variance_image
コード例 #9
0
ファイル: routine.py プロジェクト: avellal14/BBD_Pipeline
def gen_train_val_data(nth_fold, flags):
    """
    gen_train_val_data generates training and validation data for training the network. It builds
    directories for train and test and extract patches according to the provided 'method', and it
    maintains a log file containing the contents of all the data splits

    param: nth_fold
    param method: sliding_window
    return: void
    """

    ########## check whether 'cv' or 'perm' exists and which one to use ##########
    list_dir = os.listdir(os.path.join(flags['experiment_folder']))
    if ('cv' + str(nth_fold) in list_dir) and ('perm' + str(nth_fold)
                                               in list_dir):
        raise ValueError('Dangerous! You have both cv and perm on the path.')
    elif 'cv' + str(nth_fold) in list_dir:
        object_folder = os.path.join(flags['experiment_folder'],
                                     'cv' + str(nth_fold))
    elif 'perm' + str(nth_fold) in list_dir:
        object_folder = os.path.join(flags['experiment_folder'],
                                     'perm' + str(nth_fold))
    else:
        raise ValueError('No cv or perm folder!')

    ########## create train and val paths ##########
    path_dict = dict()
    path_dict['train_folder'] = os.path.join(object_folder, 'train')
    path_dict['val_folder'] = os.path.join(object_folder, 'val')
    create_dir(path_dict['train_folder'])
    create_dir(path_dict['val_folder'])

    print("Gets to the beginning of an if statement")
    ########## extract patches and put in a designated directory ##########
    if flags['gen_train_val_method'] == 'sliding_window':

        key_list = ['image', 'groundtruth', 'weight']

        for key in key_list:
            path_dict['train_' + key + '_folder'] = os.path.join(
                path_dict['train_folder'], key)
            create_dir(path_dict['train_' + key + '_folder'])
            path_dict['val_' + key + '_folder'] = os.path.join(
                path_dict['val_folder'], key)
            create_dir(path_dict['val_' + key + '_folder'])

        list_dict = dict()
        for key in key_list:
            list_dict['train_' + key + '_list'] = KScsv.read_csv(
                os.path.join(object_folder, 'train_' + key + '_list.csv'))
            list_dict['val_' + key + '_list'] = KScsv.read_csv(
                os.path.join(object_folder, 'val_' + key + '_list.csv'))

        ########## train ##########
        for key in ['train', 'val']:
            if not os.path.isfile(
                    os.path.join(path_dict[key + '_folder'],
                                 key + '_log.csv')):
                log_data = list()

                for i_image in range(len(list_dict[key + '_image_list'])):

                    tic = time.time()

                    path_image = list_dict[key + '_image_list'][i_image][0]
                    path_groundtruth = list_dict[
                        key + '_groundtruth_list'][i_image][0]
                    path_weight = list_dict[key + '_weight_list'][i_image][0]

                    #Resize image, groundtruth, and weight from 10x input size to 2.5x (level at which network operates)
                    image = KSimage.imread(path_image)
                    image = KSimage.imresize(image, 0.25)

                    groundtruth = KSimage.imread(path_groundtruth)
                    groundtruth = KSimage.imresize(groundtruth, 0.25)

                    weight = KSimage.imread(path_weight)
                    weight = KSimage.imresize(weight, 0.25)

                    #make sure that groundtruth images have depth = 1
                    if (len(groundtruth.shape) > 2
                            and groundtruth.shape[2] > 1):
                        groundtruth = groundtruth[:, :, 1]

                    groundtruth[
                        groundtruth ==
                        3] = 2  #remove all intra-stromal epithelium labels and set them simply to stroma
                    groundtruth[
                        groundtruth ==
                        4] = 3  #fat label was originally 4 but is now changed to 3

                    dict_obj = {
                        'image': image,
                        'groundtruth': groundtruth,
                        'weight': weight
                    }

                    extractor = extract_patches.sliding_window(
                        dict_obj, flags['size_input_patch'],
                        flags['size_output_patch'], flags['stride'])

                    for j, (out_obj_dict, coord_dict) in enumerate(extractor):
                        images = out_obj_dict['image']
                        groundtruths = out_obj_dict['groundtruth']
                        weights = out_obj_dict['weight']
                        coord_images = coord_dict['image']

                        #############################################################

                        basename = os.path.basename(path_image)
                        basename = os.path.splitext(basename)[0]

                        image_name = os.path.join(
                            path_dict[key + '_image_folder'], basename +
                            '_idx' + str(j) + '_row' + str(coord_images[0]) +
                            '_col' + str(coord_images[1]) + flags['image_ext'])
                        label_name = os.path.join(
                            path_dict[key + '_groundtruth_folder'],
                            basename + '_idx' + str(j) + '_row' +
                            str(coord_images[0]) + '_col' +
                            str(coord_images[1]) + flags['groundtruth_ext'])
                        weight_name = os.path.join(
                            path_dict[key + '_weight_folder'],
                            basename + '_idx' + str(j) + '_row' +
                            str(coord_images[0]) + '_col' +
                            str(coord_images[1]) + flags['weight_ext'])

                        if not os.path.isfile(image_name):
                            KSimage.imwrite(images, image_name)

                        if not os.path.isfile(label_name):
                            KSimage.imwrite(groundtruths, label_name)

                        if not os.path.isfile(weight_name):
                            KSimage.imwrite(weights, weight_name)

                        log_data.append((image_name, label_name, weight_name))

                    print('finish processing %d image from %d images : %.2f' %
                          (i_image + 1, len(list_dict[key + '_image_list']),
                           time.time() - tic))

                KScsv.write_csv(
                    log_data,
                    os.path.join(path_dict[key + '_folder'], key + '_log.csv'))

    ####################################################################################################################
    else:
        print(
            "ONLY SLIDING WINDOW TRAINING IS SUPPORTED!!!! Training terminated."
        )
        return
コード例 #10
0
he_dcis_segmentation_result_path = os.path.join('Result_tumour')

dict_path = {'he': he_dir}
dict_ext = {'he': '.tiff'}
gpu_list = ['0']

#######################################################################
# generate mask
mask_path = 'Mask'
routine.create_dir(mask_path)

files = glob.glob(os.path.join(dict_path['he'], '*' + dict_ext['he']))

for file in files:
    basename = os.path.basename(file)
    basename = os.path.splitext(basename)[0]
    savename = os.path.join(mask_path, basename + '.png')

    I = KSimage.imread(file)
    mask = 255 * np.ones(shape=(I.shape[0], I.shape[1]), dtype=np.uint8)
    KSimage.imwrite(mask, savename)

#######################################################################
# he cell segmentation
Modules.he_cell_segmentation(he_dir, dict_ext, mask_path,
                             he_cell_segmentation_result_path, gpu_list)

# he tumour_seg
Modules.he_dcis_segmentation(he_dir, dict_ext,
                             he_dcis_segmentation_result_path, gpu_list)
コード例 #11
0
    # Perform Color Deconvolution of Source Image to get stain concentration matrix
    C, M_source = deconvolve(source, M_source, I0_source)

    # Vectorize to Nx3 matrix
    C = C.reshape(-1, 3)

    # Find the 99th percentile of stain concentration(for each channel)
    max_C_source = np.percentile(a=C, q=99, axis=0)

    # main normalisation
    C = C / max_C_source
    C = C * max_C_target

    # Reconstruct the RGB image
    norm = I0_source * np.exp(np.matmul(C, -M_target)) - 1
    norm = norm.reshape(h, w, 3)
    norm = norm.clip(0, I0_source).astype(source.dtype)

    return norm


#####################################################################
target = KSimage.imread('1_421_1_2_7_999_11.jpg')
source = KSimage.imread('b001.tif')

norm = stain_normalisation_macenko(source, target)

KSimage.imwrite(norm, 'result.tiff')

print('done')