def setUp(self):

        # define parameters
        args = {
            'reference_mu_lab': [8.63234435, -0.11501964, 0.03868433],
            'reference_std_lab': [0.57506023, 0.10403329, 0.01364062],
            'min_radius': 12,
            'max_radius': 30,
            'foreground_threshold': 60,
            'min_nucleus_area': 80,
            'local_max_search_radius': 10,
        }

        args = collections.namedtuple('Parameters', args.keys())(**args)

        # read input image
        input_image_file = os.path.join(TEST_DATA_DIR, 'Easy1.png')

        im_input = skimage.io.imread(input_image_file)[:, :, :3]

        # perform color normalization
        im_input_nmzd = htk_cnorm.reinhard(im_input, args.reference_mu_lab,
                                           args.reference_std_lab)

        # perform color decovolution
        w = htk_cdeconv.rgb_separate_stains_macenko_pca(
            im_input_nmzd, im_input_nmzd.max())

        im_stains = htk_cdeconv.color_deconvolution(im_input_nmzd, w).Stains

        nuclei_channel = htk_cdeconv.find_stain_index(
            htk_cdeconv.stain_color_map['hematoxylin'], w)

        im_nuclei_stain = im_stains[:, :, nuclei_channel].astype(np.float)

        cytoplasm_channel = htk_cdeconv.find_stain_index(
            htk_cdeconv.stain_color_map['eosin'], w)

        im_cytoplasm_stain = im_stains[:, :,
                                       cytoplasm_channel].astype(np.float)

        # segment nuclei
        im_nuclei_seg_mask = cli_utils.detect_nuclei_kofahi(
            im_nuclei_stain, args)

        # perform connected component analysis
        nuclei_rprops = skimage.measure.regionprops(im_nuclei_seg_mask)

        # compute nuclei features
        fdata_nuclei = htk_features.compute_nuclei_features(
            im_nuclei_seg_mask,
            im_nuclei_stain,
            im_cytoplasm=im_cytoplasm_stain)

        self.im_input = im_input
        self.im_input_nmzd = im_input_nmzd
        self.im_nuclei_stain = im_nuclei_stain
        self.im_nuclei_seg_mask = im_nuclei_seg_mask
        self.nuclei_rprops = nuclei_rprops
        self.fdata_nuclei = fdata_nuclei
def main(args):

    # Read Input Image
    print('>> Reading input image')

    print(args.inputImageFile)

    ts = large_image.getTileSource(args.inputImageFile)

    im_input = ts.getRegion(format=large_image.tilesource.TILE_FORMAT_NUMPY,
                            **utils.get_region_dict(args.region,
                                                    args.maxRegionSize, ts))[0]

    # Create stain matrix
    print('>> Creating stain matrix')

    w = utils.get_stain_matrix(args)
    print(w)

    # Perform color deconvolution
    print('>> Performing color deconvolution')
    im_stains = htk_cd.color_deconvolution(im_input, w).Stains

    # write stain images to output
    print('>> Outputting individual stain images')

    print(args.outputStainImageFile_1)
    skimage.io.imsave(args.outputStainImageFile_1, im_stains[:, :, 0])

    print(args.outputStainImageFile_2)
    skimage.io.imsave(args.outputStainImageFile_2, im_stains[:, :, 1])

    print(args.outputStainImageFile_3)
    skimage.io.imsave(args.outputStainImageFile_3, im_stains[:, :, 2])
def main(args):

    # Read Input Image
    print('>> Reading input image')

    print(args.inputImageFile)

    im_input = skimage.io.imread(args.inputImageFile)

    # Create stain matrix
    print('>> Creating stain matrix')

    w = np.array([args.stainColor_1, args.stainColor_2, args.stainColor_3]).T
    print w

    # Perform color deconvolution
    print('>> Performing color deconvolution')
    im_stains = htk_cd.color_deconvolution(im_input, w).Stains

    # write stain images to output
    print('>> Outputting individual stain images')

    print args.outputStainImageFile_1
    skimage.io.imsave(args.outputStainImageFile_1, im_stains[:, :, 0])

    print args.outputStainImageFile_2
    skimage.io.imsave(args.outputStainImageFile_2, im_stains[:, :, 1])

    print args.outputStainImageFile_3
    skimage.io.imsave(args.outputStainImageFile_3, im_stains[:, :, 2])
示例#4
0
def detect_tile_nuclei(slide_path, tile_position, args, **it_kwargs):

    # get slide tile source
    ts = large_image.getTileSource(slide_path)

    # get requested tile
    tile_info = ts.getSingleTile(
        tile_position=tile_position,
        format=large_image.tilesource.TILE_FORMAT_NUMPY,
        **it_kwargs)

    # get tile image
    im_tile = tile_info['tile'][:, :, :3]

    # perform color normalization
    im_nmzd = htk_cnorm.reinhard(im_tile, args.reference_mu_lab,
                                 args.reference_std_lab)

    # perform color decovolution
    w = cli_utils.get_stain_matrix(args)

    im_stains = htk_cdeconv.color_deconvolution(im_nmzd, w).Stains

    im_nuclei_stain = im_stains[:, :, 0].astype(np.float)

    # segment nuclei
    im_nuclei_seg_mask = cli_utils.detect_nuclei_kofahi(im_nuclei_stain, args)

    # generate nuclei annotations
    nuclei_annot_list = cli_utils.create_tile_nuclei_annotations(
        im_nuclei_seg_mask, tile_info, args.nuclei_annotation_format)

    return nuclei_annot_list
示例#5
0
def main(args):

    # Read Input Image
    print('>> Reading input image')

    print(args.inputImageFile)

    im_input = skimage.io.imread(args.inputImageFile)[:, :, :3]

    # Create stain matrix
    print('>> Creating stain matrix')

    w_init = np.array([args.stainColor_1, args.stainColor_2]).T

    print w_init

    # Perform color deconvolution
    print('>> Performing color deconvolution')

    res = htk_cdeconv.sparse_color_deconvolution(im_input, w_init, args.beta)
    w_est = np.concatenate((res.Wc, np.zeros((3, 1))), 1)
    res = htk_cdeconv.color_deconvolution(im_input, w_est)

    # write stain images to output
    print('>> Outputting individual stain images')

    print args.outputStainImageFile_1
    skimage.io.imsave(args.outputStainImageFile_1, res.Stains[:, :, 0])

    print args.outputStainImageFile_2
    skimage.io.imsave(args.outputStainImageFile_2, res.Stains[:, :, 1])

    print args.outputStainImageFile_3
    skimage.io.imsave(args.outputStainImageFile_3, res.Stains[:, :, 2])
def detect_nuclei(im_tile, tile_info=None, args=None,
                  src_mu_lab=None, src_sigma_lab=None):
    args = args or default_args

    # perform color normalization
    im_nmzd = htk_cnorm.reinhard(im_tile,
                                 args.reference_mu_lab,
                                 args.reference_std_lab,
                                 src_mu=src_mu_lab,
                                 src_sigma=src_sigma_lab)

    # perform color decovolution
    w = cli_utils.get_stain_matrix(args)

    im_stains = htk_cdeconv.color_deconvolution(im_nmzd, w).Stains

    im_nuclei_stain = im_stains[:, :, 0].astype(np.float)

    # segment nuclei
    im_nuclei_seg_mask = cli_utils.detect_nuclei_kofahi(im_nuclei_stain, args)

    # generate nuclei annotations
    nuclei_annot_list = cli_utils.create_tile_nuclei_annotations(
        im_nuclei_seg_mask, tile_info=tile_info,
        format=args.nuclei_annotation_format)

    return nuclei_annot_list
示例#7
0
def detect_nuclei_kofahi(im_input, args):

    # perform color normalization
    im_nmzd = htk_cnorm.reinhard(im_input, args.reference_mu_lab,
                                 args.reference_std_lab)

    # perform color decovolution
    w = np.array([
        stain_color_map[args.stain_1], stain_color_map[args.stain_2],
        stain_color_map[args.stain_3]
    ]).T

    im_stains = htk_cdeconv.color_deconvolution(im_nmzd, w).Stains

    im_nuclei_stain = im_stains[:, :, 0].astype(np.float)

    # segment foreground (assumes nuclei are darker on a bright background)
    im_nuclei_fgnd_mask = sp.ndimage.morphology.binary_fill_holes(
        im_nuclei_stain < args.foreground_threshold)

    # run adaptive multi-scale LoG filter
    im_log = htk_shape_filters.clog(im_nuclei_stain,
                                    im_nuclei_fgnd_mask,
                                    sigma_min=args.min_radius * np.sqrt(2),
                                    sigma_max=args.max_radius * np.sqrt(2))

    # apply local maximum clustering
    im_nuclei_seg_mask, seeds, max = htk_seg.nuclear.max_clustering(
        im_log, im_nuclei_fgnd_mask, args.local_max_search_radius)

    # filter out small objects
    im_nuclei_seg_mask = htk_seg.label.area_open(
        im_nuclei_seg_mask, args.min_nucleus_area).astype(np.int)

    return im_nuclei_seg_mask
def compute_tile_nuclei_features(slide_path, tile_position, args, it_kwargs,
                                 src_mu_lab=None, src_sigma_lab=None):

    # get slide tile source
    ts = large_image.getTileSource(slide_path)

    # get requested tile
    tile_info = ts.getSingleTile(
        tile_position=tile_position,
        format=large_image.tilesource.TILE_FORMAT_NUMPY,
        **it_kwargs)

    # get tile image
    im_tile = tile_info['tile'][:, :, :3]

    # perform color normalization
    im_nmzd = htk_cnorm.reinhard(im_tile,
                                 args.reference_mu_lab,
                                 args.reference_std_lab,
                                 src_mu=src_mu_lab,
                                 src_sigma=src_sigma_lab)

    # perform color decovolution
    w = cli_utils.get_stain_matrix(args)

    im_stains = htk_cdeconv.color_deconvolution(im_nmzd, w).Stains

    im_nuclei_stain = im_stains[:, :, 0].astype(np.float)

    # segment nuclei
    im_nuclei_seg_mask = cli_utils.detect_nuclei_kofahi(im_nuclei_stain, args)

    # generate nuclei annotations
    nuclei_annot_list = cli_utils.create_tile_nuclei_annotations(
        im_nuclei_seg_mask, tile_info, args.nuclei_annotation_format)

    # compute nuclei features
    if args.cytoplasm_features:
        im_cytoplasm_stain = im_stains[:, :, 1].astype(np.float)
    else:
        im_cytoplasm_stain = None

    fdata = htk_features.compute_nuclei_features(
        im_nuclei_seg_mask, im_nuclei_stain, im_cytoplasm_stain,
        fsd_bnd_pts=args.fsd_bnd_pts,
        fsd_freq_bins=args.fsd_freq_bins,
        cyto_width=args.cyto_width,
        num_glcm_levels=args.num_glcm_levels,
        morphometry_features_flag=args.morphometry_features,
        fsd_features_flag=args.fsd_features,
        intensity_features_flag=args.intensity_features,
        gradient_features_flag=args.gradient_features,
    )

    fdata.columns = ['Feature.' + col for col in fdata.columns]

    return nuclei_annot_list, fdata
def detect_tile_nuclei(slide_path, tile_position, args, it_kwargs,
                       src_mu_lab=None, src_sigma_lab=None):

    # get slide tile source
    ts = large_image.getTileSource(slide_path)

    # get requested tile
    tile_info = ts.getSingleTile(
        tile_position=tile_position,
        format=large_image.tilesource.TILE_FORMAT_NUMPY,
        **it_kwargs)

    # get tile image
    im_tile = tile_info['tile'][:, :, :3]

    # perform color normalization
    im_nmzd = htk_cnorm.reinhard(im_tile,
                                 args.reference_mu_lab,
                                 args.reference_std_lab,
                                 src_mu=src_mu_lab,
                                 src_sigma=src_sigma_lab)

    # perform color decovolution
    w = cli_utils.get_stain_matrix(args)

    im_stains = htk_cdeconv.color_deconvolution(im_nmzd, w).Stains

    im_nuclei_stain = im_stains[:, :, 0].astype(np.float)

    # segment nuclear foreground
    im_nuclei_fgnd_mask = im_nuclei_stain < args.foreground_threshold

    # segment nuclei
    im_nuclei_seg_mask = htk_nuclear.detect_nuclei_kofahi(
        im_nuclei_stain,
        im_nuclei_fgnd_mask,
        args.min_radius,
        args.max_radius,
        args.min_nucleus_area,
        args.local_max_search_radius
    )

    # Delete border nuclei
    if args.ignore_border_nuclei is True:

        im_nuclei_seg_mask = htk_seg_label.delete_border(im_nuclei_seg_mask)

    # generate nuclei annotations
    nuclei_annot_list = []

    flag_nuclei_found = np.any(im_nuclei_seg_mask)

    if flag_nuclei_found:
        nuclei_annot_list = cli_utils.create_tile_nuclei_annotations(
            im_nuclei_seg_mask, tile_info, args.nuclei_annotation_format)

    return nuclei_annot_list
    def test_roundtrip(self):
        im_path = os.path.join(TEST_DATA_DIR, 'Easy1.png')
        im = skimage.io.imread(im_path)[..., :3]

        w = np.array([[0.650, 0.072, 0], [0.704, 0.990, 0], [0.286, 0.105, 0]])

        conv_result = htk_dcv.color_deconvolution(im, w, 255)

        im_reconv = htk_dcv.color_convolution(conv_result.StainsFloat,
                                              conv_result.Wc, 255)

        np.testing.assert_allclose(im, im_reconv, atol=1)
    def test_roundtrip(self):
        im_path = utilities.externaldata('data/Easy1.png.sha512')
        im = skimage.io.imread(im_path)[..., :3]

        w = np.array([[0.650, 0.072, 0], [0.704, 0.990, 0], [0.286, 0.105, 0]])

        conv_result = htk_dcv.color_deconvolution(im, w, 255)

        im_reconv = htk_dcv.color_convolution(conv_result.StainsFloat,
                                              conv_result.Wc, 255)

        np.testing.assert_allclose(im, im_reconv, atol=1)
def detect_nuclei(im_input, min_radius=6, max_radius=10, display_result=False):

    # color normalization
    ref_mu_lab = (8.63234435, -0.11501964, 0.03868433)
    ref_std_lab = (0.57506023, 0.10403329, 0.01364062)

    im_nmzd = htk_cnorm.reinhard(im_input, ref_mu_lab, ref_std_lab)

    # color deconvolution
    w_est = htk_cdeconv.rgb_separate_stains_macenko_pca(im_nmzd, 255)
    nuclear_chid = htk_cdeconv.find_stain_index(
        htk_cdeconv.stain_color_map['hematoxylin'], w_est)
    im_nuclei_stain = htk_cdeconv.color_deconvolution(im_nmzd, w_est,
                                                      255).Stains[:, :,
                                                                  nuclear_chid]

    # segment nuclei foreground
    th = skimage.filters.threshold_li(im_nuclei_stain) * 0.8
    # th = skimage.filters.threshold_otsu(im_nuclei_stain)
    im_fgnd_mask = im_nuclei_stain < th
    im_fgnd_mask = skimage.morphology.opening(im_fgnd_mask,
                                              skimage.morphology.disk(2))
    im_fgnd_mask = skimage.morphology.closing(im_fgnd_mask,
                                              skimage.morphology.disk(1))

    # detect nuclei
    im_dog, im_dog_sigma = htk_shape_filters.cdog(
        im_nuclei_stain,
        im_fgnd_mask,
        sigma_min=min_radius / np.sqrt(2),
        sigma_max=max_radius / np.sqrt(2))

    nuclei_coord = skimage.feature.peak_local_max(im_dog,
                                                  min_distance=min_radius / 2,
                                                  threshold_rel=0.1)

    nuclei_coord = nuclei_coord[im_fgnd_mask[nuclei_coord[:, 0],
                                             nuclei_coord[:, 1]], :]

    nuclei_rad = np.array([
        im_dog_sigma[nuclei_coord[i, 0], nuclei_coord[i, 1]] * np.sqrt(2)
        for i in range(nuclei_coord.shape[0])
    ])

    # display result
    if display_result:

        print 'Number of nuclei = ', nuclei_coord.shape[0]

        plt.figure(figsize=(30, 20))
        plt.subplot(2, 2, 1)
        plt.imshow(im_input)
        plt.title('Input', fontsize=labelsize)
        plt.axis('off')

        plt.subplot(2, 2, 2)
        plt.imshow(im_nuclei_stain)
        plt.title('Deconv nuclei stain', fontsize=labelsize)
        plt.axis('off')

        plt.subplot(2, 2, 3)
        plt.imshow(im_fgnd_mask)
        plt.title('Foreground mask', fontsize=labelsize)
        plt.axis('off')

        plt.subplot(2, 2, 4)
        plt.imshow(im_nmzd)
        plt.plot(nuclei_coord[:, 1], nuclei_coord[:, 0], 'k+')

        for i in range(nuclei_coord.shape[0]):

            cx = nuclei_coord[i, 1]
            cy = nuclei_coord[i, 0]
            r = nuclei_rad[i]

            mcircle = mpatches.Circle((cx, cy), r, color='g', fill=False)
            plt.gca().add_patch(mcircle)

        plt.title('Nuclei detection', fontsize=labelsize)
        plt.axis('off')

        plt.tight_layout()

    return nuclei_coord, nuclei_rad
示例#13
0
    def test_segment_nuclei_kofahi(self):

        input_image_file = datastore.fetch('Easy1.png')

        ref_image_file = datastore.fetch('L1.png')

        # read input image
        im_input = skimage.io.imread(input_image_file)[:, :, :3]

        # read reference image
        im_reference = skimage.io.imread(ref_image_file)[:, :, :3]

        # get mean and stddev of reference image in lab space
        mean_ref, std_ref = htk_cvt.lab_mean_std(im_reference)

        # perform color normalization
        im_nmzd = htk_cnorm.reinhard(im_input, mean_ref, std_ref)

        # perform color decovolution
        stain_color_map = {
            'hematoxylin': [0.65, 0.70, 0.29],
            'eosin': [0.07, 0.99, 0.11],
            'dab': [0.27, 0.57, 0.78],
            'null': [0.0, 0.0, 0.0]
        }

        w = htk_cdeconv.rgb_separate_stains_macenko_pca(im_nmzd, im_nmzd.max())

        im_stains = htk_cdeconv.color_deconvolution(im_nmzd, w).Stains

        nuclei_channel = htk_cdeconv.find_stain_index(
            stain_color_map['hematoxylin'], w)

        im_nuclei_stain = im_stains[:, :, nuclei_channel].astype(np.float)

        # segment nuclei
        im_nuclei_seg_mask = htk_seg.nuclear.detect_nuclei_kofahi(
            im_nuclei_stain,
            im_nuclei_stain < 60,
            min_radius=20,
            max_radius=30,
            min_nucleus_area=80,
            local_max_search_radius=10)

        num_nuclei = len(np.unique(im_nuclei_seg_mask)) - 1

        # check if segmentation mask matches ground truth
        gtruth_mask_file = os.path.join(
            datastore.fetch('Easy1_nuclei_seg_kofahi.npy'))

        im_gtruth_mask = np.load(gtruth_mask_file)

        num_nuclei_gtruth = len(np.unique(im_gtruth_mask)) - 1

        assert num_nuclei == num_nuclei_gtruth

        np.testing.assert_allclose(im_nuclei_seg_mask, im_gtruth_mask)

        # check no nuclei case
        im_nuclei_seg_mask = htk_seg.nuclear.detect_nuclei_kofahi(
            255 * np.ones_like(im_nuclei_stain),
            np.ones_like(im_nuclei_stain),
            min_radius=20,
            max_radius=30,
            min_nucleus_area=80,
            local_max_search_radius=10)

        num_nuclei = len(np.unique(im_nuclei_seg_mask)) - 1

        assert num_nuclei == 0
示例#14
0
def detect_tile_nuclei(slide_path, tile_position, args, it_kwargs,
                       src_mu_lab=None, src_sigma_lab=None, debug=False):

    # =========================================================================
    # ======================= Tile Loading ====================================
    # =========================================================================
    print('\n>> Loading Tile ... \n')

    csv_dict = {}

    csv_dict['PreparationTime'] = []
    csv_dict['ColorDeconvTime'] = []
    csv_dict['TotalTileLoadingTime'] = []

    csv_dict['CKPTLoadingTime'] = []
    csv_dict['ModelInfernceTime'] = []
    csv_dict['DetectionTime'] = []

    csv_dict['ROIShape'] = []
    csv_dict['ObjectsDict'] = []
    csv_dict['NumObjects'] = []

    csv_dict['AnnotationWritingTime'] = []

    csv_dict['AnnotationDict'] = []
    csv_dict['AnalysisDict'] = []

    start_time = time.time()
    total_tileloading_start_time = time.time()

    ts = large_image.getTileSource(slide_path)
    tile_info = ts.getSingleTile(
        tile_position=tile_position,
        format=large_image.tilesource.TILE_FORMAT_NUMPY,
        **it_kwargs)
    im_tile = tile_info['tile'][:, :, :3]
    csv_dict['ROIShape'] = im_tile.shape[:2]

    prep_time = time.time() - start_time
    csv_dict['PreparationTime'] = round(prep_time, 3)

    # =========================================================================
    # =================Img Normalization & Color Deconv========================
    # =========================================================================
    print('\n>> Color Deconvolving ... \n')
    start_time = time.time()

    im_nmzd = htk_cnorm.reinhard(
        im_tile,
        REFERENCE_MU_LAB,
        REFERENCE_STD_LAB,
        src_mu=src_mu_lab,
        src_sigma=src_sigma_lab
    )

    # perform color decovolution
    if args.deconv_method == 'ruifrok':

        w = cli_utils.get_stain_matrix(args)
        im_stains = htk_cdeconv.color_deconvolution(
            im_nmzd, w).Stains.astype(np.float)[:, :, :2]

    elif args.deconv_method == 'macenko':

        w_est = htk_cdeconv.rgb_separate_stains_macenko_pca(im_tile, 255)
        im_stains = htk_cdeconv.color_deconvolution(
            im_tile, w_est, 255).Stains.astype(np.float)
        ch1 = htk_cdeconv.find_stain_index(
            htk_cdeconv.stain_color_map[args.stain_1], w_est)
        ch2 = htk_cdeconv.find_stain_index(
            htk_cdeconv.stain_color_map[args.stain_2], w_est)
        im_stains = im_stains[:, :, [ch1, ch2]]

    else:

        raise ValueError('Invalid deconvolution method parameter.')

    # =========================================================================
    # ====================== Fuse the stain1 & stain2 pix======================
    # =========================================================================

    # compute nuclear foreground mask
    im_fgnd_mask_stain_1 = im_stains[
        :, :, 0] < threshold_yen(im_stains[:, :, 0])
    im_fgnd_mask_stain_2 = im_stains[
        :, :, 1] < threshold_yen(im_stains[:, :, 1])
    im_fgnd_seg_mask = im_fgnd_mask_stain_1 | im_fgnd_mask_stain_2

    # segment nuclei
    im_nuc_det_input = np.squeeze(np.min(im_stains[:, :, :2], axis=2))
    print('---> Fusing 2 Stains')
    deconv_time = time.time() - start_time
    csv_dict['ColorDeconvTime'] = round(deconv_time, 3)

    # =========================================================================
    # ================= Nuclie Detection Deep Learning Block ==================
    # =========================================================================

    total_tileloading_time = time.time() - total_tileloading_start_time
    csv_dict['TotalTileLoadingTime'] = round(total_tileloading_time, 3)

    start_time = time.time()

    config = get_config(CONFIG)
    config.model.rcnn.proposals.total_max_detections = args.max_det
    config.model.rcnn.proposals.min_prob_threshold = args.min_prob
    im_nuc_det_input = np.stack((im_nuc_det_input,) * 3, axis=-1)

    # ====================================================================================================================================
    tf.reset_default_graph()

    dataset_class = get_dataset('object_detection')
    model_class = get_model('fasterrcnn')
    dataset = dataset_class(config)
    model = model_class(config)

    graph = tf.Graph()
    session = tf.Session(graph=graph)

    with graph.as_default():
        image_placeholder = tf.placeholder(
            tf.float32, (None, None, 3), name='Input_Placeholder'
        )
        pred_dict = model(image_placeholder)

        ckpt_loading_start_time = time.time()

        saver = tf.train.Saver(sharded=True, allow_empty=True)
        saver.restore(session, CKPT_DIR)
        tf.logging.info('Loaded checkpoint.')

        ckpt_loading_time = time.time() - ckpt_loading_start_time
        csv_dict['CKPTLoadingTime'] = round(ckpt_loading_time, 3)

        inference_start_time = time.time()

        cls_prediction = pred_dict['classification_prediction']
        objects_tf = cls_prediction['objects']
        objects_labels_tf = cls_prediction['labels']
        objects_labels_prob_tf = cls_prediction['probs']

        fetches = {
            'objects': objects_tf,
            'labels': objects_labels_tf,
            'probs': objects_labels_prob_tf,
        }

        fetched = session.run(fetches, feed_dict={
            image_placeholder: np.array(im_nuc_det_input)
        })

        inference_time = time.time() - inference_start_time
        csv_dict['ModelInfernceTime'] = round(inference_time, 3)

        objects = fetched['objects']
        labels = fetched['labels'].tolist()
        probs = fetched['probs'].tolist()

        # Cast to int to consistently return the same type in Python 2 and 3
        objects = [
            [int(round(coord)) for coord in obj]
            for obj in objects.tolist()
        ]

        predictions = sorted([
            {
                'bbox': obj,
                'label': label,
                'prob': round(prob, 4),
            } for obj, label, prob in zip(objects, labels, probs)
        ], key=lambda x: x['prob'], reverse=True)

    print('\n>> Finishing Detection ... \n')
    print('***** Number of Detected Cells ****** : ', len(predictions))
    detection_time = time.time() - start_time
    csv_dict['DetectionTime'] = round(detection_time, 3)
    csv_dict['NumObjects'] = len(predictions)
    csv_dict['ObjectsDict'] = predictions

    # =========================================================================
    # ======================= TODO: Implement border deletion =================
    # =========================================================================

    # =========================================================================
    # ======================= Write Annotations ===============================
    # =========================================================================

    start_time = time.time()

    objects_df = pd.DataFrame(objects)
    formatted_annot_list,\
        formatter_analysis_list = cli_utils.convert_preds_to_utilformat(
            objects_df,
            probs,
            args.ignore_border_nuclei,
            im_tile_size=args.analysis_tile_size)

    nuclei_annot_list = cli_utils.create_tile_nuclei_annotations(
        formatted_annot_list, tile_info, args.nuclei_annotation_format)
    csv_dict['AnnotationDict'] = nuclei_annot_list

    csv_dict['AnalysisDict'] = formatter_analysis_list

    num_nuclei = len(nuclei_annot_list)

    anot_time = time.time() - start_time
    csv_dict['AnnotationWritingTime'] = round(anot_time, 3)

    return csv_dict
    def test_segment_nuclei_kofahi(self):

        input_image_file = os.path.join(TEST_DATA_DIR, 'Easy1.png')

        ref_image_file = os.path.join(TEST_DATA_DIR, 'L1.png')

        # read input image
        im_input = skimage.io.imread(input_image_file)[:, :, :3]

        # read reference image
        im_reference = skimage.io.imread(ref_image_file)[:, :, :3]

        # get mean and stddev of reference image in lab space
        mean_ref, std_ref = htk_cvt.lab_mean_std(im_reference)

        # perform color normalization
        im_nmzd = htk_cnorm.reinhard(im_input, mean_ref, std_ref)

        # perform color decovolution
        stain_color_map = {
            'hematoxylin': [0.65, 0.70, 0.29],
            'eosin': [0.07, 0.99, 0.11],
            'dab': [0.27, 0.57, 0.78],
            'null': [0.0, 0.0, 0.0]
        }

        w = htk_cdeconv.rgb_separate_stains_macenko_pca(im_nmzd, im_nmzd.max())

        im_stains = htk_cdeconv.color_deconvolution(im_nmzd, w).Stains

        nuclei_channel = htk_cdeconv.find_stain_index(stain_color_map['hematoxylin'], w)

        im_nuclei_stain = im_stains[:, :, nuclei_channel].astype(np.float)

        # segment foreground (assumes nuclei are darker on a bright background)
        im_nuclei_fgnd_mask = sp.ndimage.morphology.binary_fill_holes(
            im_nuclei_stain < 60)

        # run adaptive multi-scale LoG filter
        im_log, im_sigma_max = htk_shape_filters.clog(
            im_nuclei_stain, im_nuclei_fgnd_mask,
            sigma_min=20 / np.sqrt(2), sigma_max=30 / np.sqrt(2))

        # apply local maximum clustering
        im_nuclei_seg_mask, seeds, maxima = htk_seg.nuclear.max_clustering(
            im_log, im_nuclei_fgnd_mask, 10)

        # filter out small objects
        im_nuclei_seg_mask = htk_seg.label.area_open(
            im_nuclei_seg_mask, 80).astype(np.uint8)

        # perform connected component analysis
        obj_props = skimage.measure.regionprops(im_nuclei_seg_mask)

        num_nuclei = len(obj_props)

        # check if segmentation mask matches ground truth
        gtruth_mask_file = os.path.join(TEST_DATA_DIR,
                                        'Easy1_nuclei_seg_kofahi_adaptive.npy')

        im_gtruth_mask = np.load(gtruth_mask_file)

        obj_props_gtruth = skimage.measure.regionprops(im_gtruth_mask)

        num_nuclei_gtruth = len(obj_props_gtruth)

        assert(num_nuclei == num_nuclei_gtruth)

        np.testing.assert_allclose(im_nuclei_seg_mask, im_gtruth_mask)
def main(args):

    #
    # Read Input Image
    #
    print('>> Reading input image')

    im_input = skimage.io.imread(args.inputImageFile)[:, :, :3]

    #
    # Perform color normalization
    #
    print('>> Performing color normalization')

    # compute mean and stddev of input in LAB color space
    mu, sigma = htk_ccvt.lab_mean_std(im_input)

    # perform reinhard normalization
    im_nmzd = htk_cnorm.reinhard(im_input, mu, sigma)

    #
    # Perform color deconvolution
    #
    print('>> Performing color deconvolution')

    stain_color_1 = stain_color_map[args.stain_1]
    stain_color_2 = stain_color_map[args.stain_2]
    stain_color_3 = stain_color_map[args.stain_3]

    w = np.array([stain_color_1, stain_color_2, stain_color_3]).T

    im_stains = htk_cdeconv.color_deconvolution(im_nmzd, w).Stains

    im_nuclei_stain = im_stains[:, :, 0].astype(np.float)

    #
    # Perform nuclei segmentation
    #
    print('>> Performing nuclei segmentation')

    # segment foreground
    im_fgnd_mask = sp.ndimage.morphology.binary_fill_holes(
        im_nuclei_stain < args.foreground_threshold)

    # run adaptive multi-scale LoG filter
    im_log = htk_shape_filters.clog(im_nuclei_stain,
                                    im_fgnd_mask,
                                    sigma_min=args.min_radius * np.sqrt(2),
                                    sigma_max=args.max_radius * np.sqrt(2))

    im_nuclei_seg_mask, seeds, max = htk_seg.nuclear.max_clustering(
        im_log, im_fgnd_mask, args.local_max_search_radius)

    # filter out small objects
    im_nuclei_seg_mask = htk_seg.label.area_open(
        im_nuclei_seg_mask, args.min_nucleus_area).astype(np.int)

    #
    # Perform feature extraction
    #
    print('>> Performing feature extraction')

    im_nuclei = im_stains[:, :, 0]

    if args.cytoplasm_features:
        im_cytoplasm = im_stains[:, :, 1]
    else:
        im_cytoplasm = None

    df = htk_features.ComputeNucleiFeatures(
        im_nuclei_seg_mask,
        im_nuclei,
        im_cytoplasm,
        fsd_bnd_pts=args.fsd_bnd_pts,
        fsd_freq_bins=args.fsd_freq_bins,
        cyto_width=args.cyto_width,
        num_glcm_levels=args.num_glcm_levels,
        morphometry_features_flag=args.morphometry_features,
        fsd_features_flag=args.fsd_features,
        intensity_features_flag=args.intensity_features,
        gradient_features_flag=args.gradient_features,
    )

    #
    # Create HDF5 file
    #
    print('>> Writing HDF5 file')

    hdf = pd.HDFStore(args.outputFile)
    hdf.put('d1', df, format='table', data_columns=True)

    print '--- Object x Features = ', hdf['d1'].shape
示例#17
0
    def test_create_tile_nuclei_annotations(self):

        wsi_path = os.path.join(
            utilities.externaldata(
                'data/TCGA-06-0129-01Z-00-DX3.bae772ea-dd36-47ec-8185-761989be3cc8.svs.sha512'  # noqa
            ))

        # define parameters
        args = {
            'reference_mu_lab': [8.63234435, -0.11501964, 0.03868433],
            'reference_std_lab': [0.57506023, 0.10403329, 0.01364062],
            'stain_1': 'hematoxylin',
            'stain_2': 'eosin',
            'stain_3': 'null',
            'stain_1_vector': [-1, -1, -1],
            'stain_2_vector': [-1, -1, -1],
            'stain_3_vector': [-1, -1, -1],
            'min_fgnd_frac': 0.50,
            'analysis_mag': 20,
            'analysis_tile_size': 1200,
            'foreground_threshold': 60,
            'min_radius': 6,
            'max_radius': 12,
            'min_nucleus_area': 25,
            'local_max_search_radius': 8,

            # In Python 3 unittesting, the scheduler fails if it uses processes
            'scheduler': 'multithreading',  # None,
            'num_workers': -1,
            'num_threads_per_worker': 1,
        }

        args = collections.namedtuple('Parameters', args.keys())(**args)

        # read WSI
        ts = large_image.getTileSource(wsi_path)

        ts_metadata = ts.getMetadata()

        analysis_tile_size = {
            'width':
            int(ts_metadata['tileWidth'] * np.floor(
                1.0 * args.analysis_tile_size / ts_metadata['tileWidth'])),
            'height':
            int(ts_metadata['tileHeight'] * np.floor(
                1.0 * args.analysis_tile_size / ts_metadata['tileHeight']))
        }

        # define ROI
        roi = {
            'left': ts_metadata['sizeX'] / 2,
            'top': ts_metadata['sizeY'] * 3 / 4,
            'width': analysis_tile_size['width'],
            'height': analysis_tile_size['height'],
            'units': 'base_pixels'
        }

        # define tile iterator parameters
        it_kwargs = {
            'tile_size': {
                'width': args.analysis_tile_size
            },
            'scale': {
                'magnification': args.analysis_mag
            },
            'region': roi
        }

        # create dask client
        cli_utils.create_dask_client(args)

        # get tile foregreoung at low res
        im_fgnd_mask_lres, fgnd_seg_scale = \
            cli_utils.segment_wsi_foreground_at_low_res(ts)

        # compute tile foreground fraction
        tile_fgnd_frac_list = htk_utils.compute_tile_foreground_fraction(
            wsi_path, im_fgnd_mask_lres, fgnd_seg_scale, it_kwargs)

        num_fgnd_tiles = np.count_nonzero(
            tile_fgnd_frac_list >= args.min_fgnd_frac)

        np.testing.assert_equal(num_fgnd_tiles, 2)

        # create nuclei annotations
        nuclei_bbox_annot_list = []
        nuclei_bndry_annot_list = []

        for tile_info in ts.tileIterator(
                format=large_image.tilesource.TILE_FORMAT_NUMPY, **it_kwargs):

            im_tile = tile_info['tile'][:, :, :3]

            # perform color normalization
            im_nmzd = htk_cnorm.reinhard(im_tile, args.reference_mu_lab,
                                         args.reference_std_lab)

            # perform color deconvolution
            w = cli_utils.get_stain_matrix(args)

            im_stains = htk_cdeconv.color_deconvolution(im_nmzd, w).Stains

            im_nuclei_stain = im_stains[:, :, 0].astype(np.float)

            # segment nuclei
            im_nuclei_seg_mask = htk_nuclear.detect_nuclei_kofahi(
                im_nuclei_stain, im_nuclei_stain < args.foreground_threshold,
                args.min_radius, args.max_radius, args.min_nucleus_area,
                args.local_max_search_radius)

            # generate nuclei annotations as bboxes
            cur_bbox_annot_list = cli_utils.create_tile_nuclei_annotations(
                im_nuclei_seg_mask, tile_info, 'bbox')

            nuclei_bbox_annot_list.extend(cur_bbox_annot_list)

            # generate nuclei annotations as boundaries
            cur_bndry_annot_list = cli_utils.create_tile_nuclei_annotations(
                im_nuclei_seg_mask, tile_info, 'boundary')

            nuclei_bndry_annot_list.extend(cur_bndry_annot_list)

        # compare nuclei bbox annotations with gtruth
        nuclei_bbox_annot_gtruth_file = os.path.join(
            utilities.externaldata(
                'data/TCGA-06-0129-01Z-00-DX3_roi_nuclei_bbox.anot.sha512'  # noqa
            ))

        with open(nuclei_bbox_annot_gtruth_file, 'r') as fbbox_annot:
            nuclei_bbox_annot_list_gtruth = json.load(fbbox_annot)['elements']

        # Check that nuclei_bbox_annot_list is nearly equal to
        # nuclei_bbox_annot_list_gtruth
        assert len(nuclei_bbox_annot_list) == len(
            nuclei_bbox_annot_list_gtruth)
        for pos in range(len(nuclei_bbox_annot_list)):
            np.testing.assert_array_almost_equal(
                nuclei_bbox_annot_list[pos]['center'],
                nuclei_bbox_annot_list_gtruth[pos]['center'], 0)
            np.testing.assert_almost_equal(
                nuclei_bbox_annot_list[pos]['width'],
                nuclei_bbox_annot_list_gtruth[pos]['width'], 1)
            np.testing.assert_almost_equal(
                nuclei_bbox_annot_list[pos]['height'],
                nuclei_bbox_annot_list_gtruth[pos]['height'], 1)

        # compare nuclei boundary annotations with gtruth
        nuclei_bndry_annot_gtruth_file = os.path.join(
            utilities.externaldata(
                'data/TCGA-06-0129-01Z-00-DX3_roi_nuclei_boundary.anot.sha512'  # noqa
            ))

        with open(nuclei_bndry_annot_gtruth_file, 'r') as fbndry_annot:
            nuclei_bndry_annot_list_gtruth = json.load(
                fbndry_annot)['elements']

        assert len(nuclei_bndry_annot_list) == len(
            nuclei_bndry_annot_list_gtruth)

        for pos in range(len(nuclei_bndry_annot_list)):

            np.testing.assert_array_almost_equal(
                nuclei_bndry_annot_list[pos]['points'],
                nuclei_bndry_annot_list_gtruth[pos]['points'], 0)
def main(args):

    #
    # Read Input Image
    #
    print('>> Reading input image')

    im_input = skimage.io.imread(args.inputImageFile)[:, :, :3]

    #
    # Perform color normalization
    #
    print('>> Performing color normalization')

    # compute mean and stddev of input in LAB color space
    mu, sigma = htk_ccvt.lab_mean_std(im_input)

    # perform reinhard normalization
    im_nmzd = htk_cnorm.reinhard(im_input, mu, sigma)

    #
    # Perform color deconvolution
    #
    print('>> Performing color deconvolution')

    stain_color_1 = stain_color_map[args.stain_1]
    stain_color_2 = stain_color_map[args.stain_2]
    stain_color_3 = stain_color_map[args.stain_3]

    w = np.array([stain_color_1, stain_color_2, stain_color_3]).T

    im_stains = htk_cdeconv.color_deconvolution(im_nmzd, w).Stains

    im_nuclei_stain = im_stains[:, :, 0].astype(np.float)

    #
    # Perform nuclei segmentation
    #
    print('>> Performing nuclei segmentation')

    # segment foreground
    im_fgnd_mask = sp.ndimage.morphology.binary_fill_holes(
        im_nuclei_stain < args.foreground_threshold)

    # run adaptive multi-scale LoG filter
    im_log = htk_shape_filters.clog(im_nuclei_stain, im_fgnd_mask,
                                    sigma_min=args.min_radius * np.sqrt(2),
                                    sigma_max=args.max_radius * np.sqrt(2))

    im_nuclei_seg_mask, seeds, max = htk_seg.nuclear.max_clustering(
        im_log, im_fgnd_mask, args.local_max_search_radius)

    # filter out small objects
    im_nuclei_seg_mask = htk_seg.label.area_open(
        im_nuclei_seg_mask, args.min_nucleus_area).astype(np.int)

    #
    # Generate annotations
    #
    obj_props = skimage.measure.regionprops(im_nuclei_seg_mask)

    print 'Number of nuclei = ', len(obj_props)

    # create basic schema
    annotation = {
        "name":          "Nuclei",
        "description":   "Nuclei bounding boxes from a segmentation algorithm",
        "attributes": {
            "algorithm": {
                "color_normalization": "reinhard",
                "color_deconvolution": "ColorDeconvolution",
                "nuclei_segmentation": ["cLOG",
                                        "MaxClustering",
                                        "FilterLabel"]
            }
        },
        "elements": []
    }

    # add each nucleus as an element into the annotation schema
    for i in range(len(obj_props)):

        c = [obj_props[i].centroid[1], obj_props[i].centroid[0], 0]
        width = obj_props[i].bbox[3] - obj_props[i].bbox[1] + 1
        height = obj_props[i].bbox[2] - obj_props[i].bbox[0] + 1

        cur_bbox = {
            "type":        "rectangle",
            "center":      c,
            "width":       width,
            "height":      height,
            "rotation":    0,
            "fillColor":   "rgba(255, 255, 255, 0)",
            "lineWidth":   2,
            "lineColor":   "rgb(34, 139, 34)"
        }

        annotation["elements"].append(cur_bbox)

    #
    # Save output segmentation mask
    #
    print('>> Outputting nuclei segmentation mask')

    skimage.io.imsave(args.outputNucleiMaskFile, im_nuclei_seg_mask)

    #
    # Save output annotation
    #
    print('>> Outputting nuclei annotation')

    with open(args.outputNucleiAnnotationFile, 'w') as annotation_file:
        json.dump(annotation, annotation_file, indent=2, sort_keys=False)