Exemple #1
0
    tif_paths = glob.glob(patch_folder + '/*.tif')

    tif_paths = random.sample(tif_paths, 500)

    # Load over images
    for tif_path in tif_paths:
        """Run segmentation on an image."""
        print(tif_path)

        folder, tif_file = os.path.split(tif_path)
        ndpi_id, x, y, w, h = tif_file[:-4].split('_')  # Find patch location
        x, y, w, h = int(x), int(y), int(w), int(h)

        # Load NDPI slide
        ndpi_path = os.path.join(DIR_cLEAN2, ndpi_id + '.ndpi')
        ndpi_slide = NDPI_Slide(ndpi_path)

        # Load image and run detection
        image = ndpi_slide.read_region((x, y), 0, (patch_size, patch_size))
        height, width, depth = image.shape
        image = cv2.resize(image, (int(width / 2), int(height / 2)))
        detection = model.detect([image])
        if len(detection) == 0:
            continue
        results = detection[0]

        # save segmentation for visualization
        # vis_path = tif_path.replace('.tif', '.jpg')
        # visualize.display_instances(
        #     image, results['rois'], results['masks'], results['class_ids'],
        #     'nucleus', results['scores'],
# We process only slides, which has blue ink rois
roi_paths = glob.glob(DIR_roi_level0 + "/*.txt")
# roi_paths = glob.glob(DIR_roi_level0+"/12XS12147.txt")

for roi_path in roi_paths:
    _, roi_file = os.path.split(roi_path)
    ndpi_id = roi_file[:-4]

    # read NDPA annotation
    ndpa_annotation_path = os.path.join(DIR_annotated, ndpi_id + '.ndpi.ndpa')
    if not os.path.exists(ndpa_annotation_path):
        print('Can not find ' + ndpa_annotation_path)

    ink_ndpi_slide_path = os.path.join(DIR_ink, ndpi_id + '.ndpi')
    ink_slide = NDPI_Slide(ink_ndpi_slide_path)
    polygons_ink, rects_ink, title_ink = ink_slide.read_ndpa_annotation(
        ndpa_annotation_path)
    # ink_patch = ink_slide.read_region((rects_ink[0][0], rects_ink[0][1]), 0, (rects_ink[0][2], rects_ink[0][3]))
    # skimage.io.imsave('ink_patch.bmp', ink_patch)

    # read ink2cLEARN align parameters
    align_path = os.path.join(DIR_image_align_ink2clean, ndpi_id + '.txt')
    align_para = np.loadtxt(align_path, delimiter=",")

    # covert to cLEAN2 image coord
    rects_cLEAN2 = []  # stores NDPA abnormal annotation in cLEARN2 coordinate
    x_offset = align_para[0, 2] * 4  # align parameter is at Level 2 resolution
    y_offset = align_para[
        1,
        2] * 4  # We work on Level 0 resolution, so there is a 2^2 difference in scale
Exemple #3
0
def crop_patch():
    ndpi_path = os.path.join(DIR_cLEAN2, '12XS00147.ndpi')
    ndpi_slide = NDPI_Slide(ndpi_path)

    roi_image = ndpi_slide.read_region((30720, 28672), 0, (2048, 2048))
    skimage.io.imsave('patch.jpg', roi_image)
Exemple #4
0
    DIR_roi_level0, \
    DIR_patch_level0

#
# Crop level 0 roi and save as TIF images
# Totally 6 slides have no blue ink ROI, we ignore them
#
from cercyt.shared.NDPI_Slide import NDPI_Slide

for coord_path in glob.glob(DIR_roi_level0 + '/*.txt'):

    # Read coord info
    coord_file = open(coord_path)
    lines = coord_file.readlines()
    coord_file.close()

    # Read NDPI slide
    folder, file = os.path.split(coord_path)
    ndpi_id = file[:-4]
    ndpi_path = os.path.join(DIR_cLEAN2, ndpi_id + '.ndpi')
    ndpi_slide = NDPI_Slide(ndpi_path)

    # Crop the ROIs
    for line in lines:
        words = line.strip().split(',')
        x, y, w, h = int(words[1]), int(words[2]), int(words[3]), int(words[4])
        roi_image = ndpi_slide.read_region((x, y), 0, (w, h))
        roi_file = '{0}_{1}_{2}_{3}_{4}.tif'.format(ndpi_id, x, y, w, h)
        roi_path = os.path.join(DIR_patch_level0, roi_file)
        skimage.io.imsave(roi_path, roi_image)
Exemple #5
0
model = modellib.MaskRCNN(mode="inference", config=config, model_dir='')
model.load_weights(model_path, by_name=True)

# Load over images
tif_paths = glob.glob(patch_folder + '/*.tif')
for tif_path in tif_paths:
    """Run segmentation on an image."""
    print(tif_path)

    folder, tif_file = os.path.split(tif_path)
    ndpi_id, x, y, w, h = tif_file[:-4].split('_')  # Find patch location
    x, y, w, h = int(x), int(y), int(w), int(h)

    # Load NDPI slide
    ndpi_path = os.path.join(DIR_cLEAN2, ndpi_id + '.ndpi')
    ndpi_slide = NDPI_Slide(ndpi_path)

    # Load NDPA annotation
    ndpa_annotation_path = os.path.join(DIR_annotated, ndpi_id + '.ndpi.ndpa')
    polygons, rects, titles = ndpi_slide.read_ndpa_annotation(
        ndpa_annotation_path)
    # Pick the one annotation corresponding to this patch
    for i, rect in enumerate(rects):
        if len(intersection(rect, (x, y, w, h))) is not 0:
            polygon = polygons[i]
            break

    # the top left corner of the patch to run mrcnn, we work at level 1 resolution
    x_0, y_0 = float(x) + float(w) / 2.0, float(y) + float(h) / 2.0
    location = int(x_0 - patch_size / 2 + 0.5), int(y_0 - patch_size / 2 + 0.5)
    DataInfo, \
    FILE_DATA_INFO, DIR_annotated, DIR_ink, DIR_image_align_ink2clean, DIR_cLEAN2, DIR_G_Tom_Patch_normal, \
    DIR_G_Tom_Patch_malignancy, DIR_G_Tom_Patch_abnormal
from cercyt.shared.NDPI_Slide import NDPI_Slide

data_info = DataInfo(FILE_DATA_INFO)
slide_ids = data_info.get_abnormal_slide_ids()

for ndpi_id in slide_ids:
    # read NDPA annotation
    ndpa_annotation_path = os.path.join(DIR_annotated, ndpi_id + '.ndpi.ndpa')
    if not os.path.exists(ndpa_annotation_path):
        print('Can not find ' + ndpa_annotation_path)

    ink_ndpi_slide_path = os.path.join(DIR_ink, ndpi_id + '.ndpi')
    ink_slide = NDPI_Slide(ink_ndpi_slide_path)
    polygons_ink, rects_ink, title_ink = ink_slide.read_ndpa_annotation(
        ndpa_annotation_path)
    # ink_patch = ink_slide.read_region((rects_ink[0][0], rects_ink[0][1]), 0, (rects_ink[0][2], rects_ink[0][3]))
    # skimage.io.imsave('ink_patch.bmp', ink_patch)

    # read ink2cLEARN align parameters
    align_path = os.path.join(DIR_image_align_ink2clean, ndpi_id + '.txt')
    align_para = np.loadtxt(align_path, delimiter=",")

    # covert to cLEAN2 image coord
    rects_cLEAN2 = []  # stores NDPA abnormal annotation in cLEARN2 coordinate
    x_offset = align_para[0, 2] * 4  # align parameter is at Level 2 resolution
    y_offset = align_para[
        1,
        2] * 4  # We work on Level 0 resolution, so there is a 2^2 difference in scale
# Collect some patches from normal slides
slides = data_info.get_normal_cell_patch_classification_train_slide_ids() + \
         data_info.get_normal_cell_patch_classification_test_slide_ids()
dir_to_save = os.path.join(DIR_nuclei_segmentation, 'normal')

for slide_id in slides:
    patch_folder = os.path.join(DIR_tile_patches, slide_id)
    tif_paths = glob.glob(patch_folder + '/*.tif')

    tif_paths = random.sample(tif_paths, 20)

    # Load over images
    for tif_path in tif_paths:
        print(tif_path)

        folder, tif_file = os.path.split(tif_path)
        ndpi_id, x, y, w, h = tif_file[:-4].split('_')  # Find patch location
        x, y, w, h = int(x), int(y), int(w), int(h)

        # Load NDPI slide
        ndpi_path = os.path.join(DIR_cLEAN2, ndpi_id + '.ndpi')
        ndpi_slide = NDPI_Slide(ndpi_path)

        # Load image and save results
        image = ndpi_slide.read_region((x, y), 0, (patch_size, patch_size))
        cell_path = os.path.join(
            dir_to_save,
            '{0}_{1}_{2}_{3}_{4}.bmp'.format(ndpi_id, x, y, patch_size,
                                             patch_size))
        skimage.io.imsave(cell_path, image)
Exemple #8
0
config = NucleusInferenceConfig()
config.display()
model = modellib.MaskRCNN(mode="inference", config=config, model_dir='')
model.load_weights(model_path, by_name=True)

image_folder = os.path.join(DIR_nuclei_segmentation, 'normal')
image_paths = glob.glob(image_folder + '/*.bmp')

for image_path in image_paths:
    folder, tif_file = os.path.split(image_path)
    ndpi_id, x, y, w, h = tif_file[:-4].split('_')  # Find patch location
    x, y, w, h = int(x), int(y), int(w), int(h)

    # Load NDPI slide
    ndpi_path = os.path.join(DIR_cLEAN2, ndpi_id + '.ndpi')
    ndpi_slide = NDPI_Slide(ndpi_path)

    # Load image and run detection
    image = ndpi_slide.read_region((x, y), 0, (h, w))
    height, width, depth = image.shape
    image = cv2.resize(image, (int(width / 2), int(height / 2)))
    detection = model.detect([image])
    if len(detection) == 0:
        continue

    results = detection[0]

    # save segmentation for visualization
    vis_path = image_path.replace('.bmp', '.jpg')
    visualize.display_instances(image,
                                results['rois'],