def load_mask(self, image_id):
        mask_2D = self.data_dict[self.image_info[image_id]['path']]['mask']
        mask_3D = utils.mask_2Dto3D(mask_2D)

        class_ids = np.ones(mask_3D.shape[2], dtype=np.int32)
        mask = mask_3D

        return mask, class_ids
    def load_mask(self, image_id):
        mask = self.data_dict[self.image_info[image_id]['path']]['mask']
        if len(mask.shape) == 2:
            mask3D = utils.mask_2Dto3D(mask).astype('bool')
        elif len(mask.shape) == 3:
            mask3D = mask.astype('bool')

        class_ids = np.ones(mask3D.shape[2], dtype=np.int32)

        return mask3D, class_ids
Esempio n. 3
0
    def load_mask(self, image_id):
        mask_2D = self.data_dict[self.image_info[image_id]['path']]['mask']
        mask_3D = utils.mask_2Dto3D(mask_2D)

        class_ids = np.ones(mask_3D.shape[2], dtype=np.int32)
        if class_bright_dark:
            if self.data_dict[self.image_info[image_id]
                              ['path']]['class'] == 'dark':
                class_ids = np.ones(mask_3D.shape[2], dtype=np.int32)
            elif self.data_dict[self.image_info[image_id]
                                ['path']]['class'] == 'bright':
                class_ids = np.ones(mask_3D.shape[2], dtype=np.int32) * 2

        mask = mask_3D

        return mask, class_ids
def process_data(data_train,
                 yn_force_bright_field=False,
                 yn_rescale_image=False,
                 yn_data_mask3D=False):

    data_train_after = copy.deepcopy(data_train)

    data_train_after = add_class_to_data_dict(data_train_after)

    # flip dark field
    if yn_force_bright_field:
        for id_img in data_train_after:
            if data_train_after[id_img]['class'] == 'dark':
                data_train_after[id_img][
                    'image'] = 255 - data_train_after[id_img]['image']

    if yn_rescale_image:
        data_train_before, data_train_after = data_train_after, {}
        print('rescaling images based on nuclei size')
        for id_img in tqdm(data_train_before.keys()):
            img_cur = data_train_before[id_img]['image']
            mask_cur = data_train_before[id_img]['mask']
            for num_nuc_on_edge_ideal in list_num_nuc_on_edge_ideal:
                res_rescale = utils.resize_image_mask(
                    img_cur,
                    mask_cur,
                    num_nuc_on_edge_ideal=num_nuc_on_edge_ideal,
                    size_patch=size_patch,
                    max_rescale_factor=4)
                image_rescale, mask_rescale, rescale_factor = res_rescale
                new_id = (id_img, num_nuc_on_edge_ideal)
                data_train_after[new_id] = {}
                data_train_after[new_id]['image'] = image_rescale
                data_train_after[new_id]['mask'] = mask_rescale
                data_train_after[new_id]['class'] = data_train_before[id_img][
                    'class']
                data_train_after[new_id][
                    'num_nuc_on_edge_ideal'] = num_nuc_on_edge_ideal
                data_train_after[new_id]['rescale_factor'] = rescale_factor

    if yn_data_mask3D:
        print('make mask in 3D format')
        for id_img in tqdm(data_train_after.keys()):
            data_train_after[id_img]['mask'] = utils.mask_2Dto3D(
                data_train_after[id_img]['mask'])

    return data_train_after
Esempio n. 5
0
                  mask_size=mask_size,
                  score=score_stitched,
                  yn_keep=yn_keep_mask)

image_detection_in_pixel = np.concatenate((image, image), axis=1)
image_detection_in_pixel[:, image.shape[1]:, :] = utils.gen_mask_contour(
    mask_pred=mask_post, mask_true=None, image=image)
plt.figure()
plt.imshow(image_detection_in_pixel)
plt.show()

# mask_stitched, score_stitched, size_seg, mask_size = gen_mask_by_mask_number_iter(image=image, size_seg_ini=512, ideal_num_mask=20, flag_plot=True, flag_use_dn=True)
# plt.show()
if False:
    mask_pred = mask_stitched
    mask_true = utils.mask_2Dto3D(data_train[image_id]['mask'])
    plt.figure(figsize=(8, 8))
    plt.subplot(2, 2, 1)
    plt.imshow(image)
    plt.axis('off')
    plt.subplot(2, 2, 2)
    plt.imshow(utils.gen_mask_contour(mask_pred, mask_true, image))
    plt.title('r: pred, g: groud truth')
    plt.axis('off')
    plt.subplot(2, 2, 3)
    utils.plot_mask2D(utils.mask_3Dto2D(mask_pred))
    plt.title('predict')
    plt.axis('off')
    plt.subplot(2, 2, 4)
    utils.plot_mask2D(utils.mask_3Dto2D(mask_true))
    plt.axis('off')
            data_train_aug[id_img + '_o3']['mask'] = np.rot90(mask_cur, 3)
        data_train_selection = data_train_aug
    else:
        data_train_selection = data_train

    data_train_seg = {}
    # split every image so that the diameter of every nuclei takes 1/16 ~ 1/8 of the image length
    # list_amplification = [8, 16]
    list_amplification = [16, 32]
    for id_img in tqdm(data_train_selection.keys()):
        img_cur = data_train_selection[id_img]['image']
        mask_cur = data_train_selection[id_img]['mask']
        size_nuclei = int(
            np.mean(
                np.sqrt(
                    np.sum(utils.mask_2Dto3D(
                        data_train_selection[id_img]['mask']),
                           axis=(0, 1)))))
        for amplification in list_amplification:
            img_cur_seg = utils.img_split(img=img_cur,
                                          size_seg=size_nuclei * amplification)
            mask_cur_seg = utils.img_split(img=mask_cur,
                                           size_seg=size_nuclei *
                                           amplification)
            for start_loc in img_cur_seg.keys():
                data_train_seg[(id_img, start_loc, amplification)] = {}
                data_train_seg[(
                    id_img, start_loc,
                    amplification)]['image'] = img_cur_seg[start_loc]
                data_train_seg[(
                    id_img, start_loc,
                    amplification)]['mask'] = mask_cur_seg[start_loc]
Esempio n. 7
0
# split image segments
img_seg = utils.img_split(image, size_seg=128, overlap=0.2)
img_seg_start = np.array(list(img_seg.keys()))
rs_start = np.unique(img_seg_start[:, 0])
cs_start = np.unique(img_seg_start[:, 1])
h_fig, h_axes = plt.subplots(len(rs_start), len(cs_start))
for i_r, r in enumerate(rs_start):
    for i_c, c in enumerate(cs_start):
        plt.axes(h_axes[i_r, i_c])
        plt.imshow(img_seg[(r, c)])
        plt.axis('off')
        plt.title((r, c), fontsize='x-small')

mask_seg = utils.img_split(mask_true, size_seg=128, overlap=0.5)
mask_seg = {key: utils.mask_2Dto3D(mask_seg[key]) for key in mask_seg}


# ----- 4.2 image stitch
importlib.reload(utils)
img_full = utils.img_stitch(img_seg)
plt.figure()
plt.imshow(img_full)

# ----- 4.3 mask stitch
importlib.reload(utils)

mask_full = utils.img_stitch(mask_seg, mode='mask')
plt.imshow(utils.mask_3Dto2D(mask_full[0]), cmap='Paired')