コード例 #1
0
def show(uid, x, y, y_c, y_m, save=False):
    threshold = config['param'].getfloat('threshold')
    segmentation = config['post'].getboolean('segmentation')
    remove_objects = config['post'].getboolean('remove_objects')
    min_object_size = config['post'].getint('min_object_size')
    view_color_equalize = config['valid'].getboolean('view_color_equalize')
    model_name = config['param']['model']

    if model_name == 'camunet':
        threshold_edge = config[model_name].getfloat('threshold_edge')
        threshold_mark = config[model_name].getfloat('threshold_mark')
    elif model_name == 'dcan' or model_name == 'caunet':
        threshold_edge = config[model_name].getfloat('threshold_edge')

    fig, (ax1, ax2) = plt.subplots(2, 3, sharey=True, figsize=(10, 8))
    fig.suptitle(uid, y=1)
    ax1[1].set_title('Final Pred, P > {}'.format(threshold))
    ax1[2].set_title('Overlay, P > {}'.format(threshold))
    y_bw = y > threshold

    if view_color_equalize:
        x = clahe(x)
    ax1[0].set_title('Image')
    ax1[0].imshow(x, aspect='auto')
    if segmentation:
        y, markers = partition_instances(y, y_m, y_c)
    if remove_objects:
        y = remove_small_objects(y, min_size=min_object_size)
    y, cmap = _make_overlay(y)
    ax1[1].imshow(y, cmap=cmap, aspect='auto')
    # alpha
    ax1[2].imshow(x, aspect='auto')
    ax1[2].imshow(y, cmap=cmap, alpha=0.3, aspect='auto')

    ax2[0].set_title('Semantic Pred, P > {}'.format(threshold))
    ax2[0].imshow(y_bw, cmap='gray', aspect='auto')
    _, count = label(markers, return_num=True)
    ax2[1].set_title('Markers, #={}'.format(count))
    ax2[1].imshow(markers, cmap='gray', aspect='auto')
    if y_c is not None:
        ax2[2].set_title('Contour Pred, P > {}'.format(threshold_edge))
        y_c = y_c > threshold_edge
        ax2[2].imshow(y_c, cmap='gray', aspect='auto')
    plt.tight_layout()

    if save:
        dir = predict_save_folder()
        fp = os.path.join(dir, uid + '.png')
        plt.savefig(fp)
    else:
        show_figure()
コード例 #2
0
def show_groundtruth(uid, x, y, y_c, y_m, gt, gt_s, gt_c, gt_m, save=False):
    threshold = config['param'].getfloat('threshold')
    segmentation = config['post'].getboolean('segmentation')
    remove_objects = config['post'].getboolean('remove_objects')
    min_object_size = config['post'].getint('min_object_size')
    only_contour = config['contour'].getboolean('exclusive')
    view_color_equalize = config['valid'].getboolean('view_color_equalize')
    print_table = config['valid'].getboolean('print_table')
    model_name = config['param']['model']

    if model_name == 'camunet':
        threshold_edge = config[model_name].getfloat('threshold_edge')
        threshold_mark = config[model_name].getfloat('threshold_mark')
    elif model_name == 'dcan' or model_name == 'caunet':
        threshold_edge = config[model_name].getfloat('threshold_edge')

    fig, (ax1, ax2, ax3) = plt.subplots(3, 4, sharey=True, figsize=(12, 8))
    fig.suptitle(uid, y=1)

    y_s = y  # to show pure semantic predict later

    if view_color_equalize:
        x = clahe(x)
    ax1[0].set_title('Image')
    ax1[0].imshow(x, aspect='auto')
    if segmentation:
        y, markers = partition_instances(y, y_m, y_c)
    if remove_objects:
        y = remove_small_objects(y, min_size=min_object_size)
    _, count = label(y, return_num=True)
    ax1[1].set_title('Final Pred, #={}'.format(count))
    ax1[1].imshow(y, cmap='gray', aspect='auto')
    # overlay contour to semantic ground truth (another visualized view for instance ground truth, eg. gt)
    _, count = label(gt, return_num=True)
    ax1[2].set_title('Instance Lbls, #={}'.format(count))
    ax1[2].imshow(gt_s, cmap='gray', aspect='auto')
    gt_c2, cmap = _make_overlay(gt_c)
    ax1[2].imshow(gt_c2, cmap=cmap, alpha=0.7, aspect='auto')
    if only_contour:  # can not tell from instances in this case
        iou = iou_metric(y, label(gt > 0), print_table)
    else:
        iou = iou_metric(y, gt, print_table)
    ax1[3].set_title('Overlay, IoU={:.3f}'.format(iou))
    ax1[3].imshow(gt_s, cmap='gray', aspect='auto')
    y, cmap = _make_overlay(y)
    ax1[3].imshow(y, cmap=cmap, alpha=0.3, aspect='auto')

    y_s = y_s > threshold
    _, count = label(y_s, return_num=True)
    ax2[0].set_title('Semantic Predict, #={}'.format(count))
    ax2[0].imshow(y_s, cmap='gray', aspect='auto')
    _, count = label(gt_s, return_num=True)
    ax2[1].set_title('Semantic Lbls, #={}'.format(count))
    ax2[1].imshow(gt_s, cmap='gray', aspect='auto')

    if y_c is not None:
        y_c = y_c > threshold_edge
        _, count = label(y_c, return_num=True)
        ax2[2].set_title('Contour Predict, #={}'.format(count))
        ax2[2].imshow(y_c, cmap='gray', aspect='auto')
        _, count = label(gt_c, return_num=True)
        ax2[3].set_title('Contour Lbls, #={}'.format(count))
        ax2[3].imshow(gt_c, cmap='gray', aspect='auto')

    _, count = label(markers, return_num=True)
    ax3[0].set_title('Final Markers, #={}'.format(count))
    ax3[0].imshow(markers, cmap='gray', aspect='auto')
    if y_m is not None:
        y_m = y_m > threshold_mark
        _, count = label(y_m, return_num=True)
        ax3[1].set_title('Marker Predict, #={}'.format(count))
        ax3[1].imshow(y_m, cmap='gray', aspect='auto')
        _, count = label(gt_m, return_num=True)
        ax3[2].set_title('Marker Lbls, #={}'.format(count))
        ax3[2].imshow(gt_m, cmap='gray', aspect='auto')

    plt.tight_layout()

    if save:
        dir = predict_save_folder()
        fp = os.path.join(dir, uid + '.png')
        plt.savefig(fp)
    else:
        show_figure()
コード例 #3
0
    def __call__(self, sample):
        image, label, label_c, label_m, label_gt = \
                sample['image'], sample['label'], sample['label_c'], sample['label_m'], sample['label_gt']
        if self.precise_contour:
            pil_masks = sample['pil_masks']
        weight = None

        if self.augment:
            if self.color_equalize and random.random() > 0.5:
                image = clahe(image)

            # perform RandomResize() or just enlarge for image size < model input size
            if random.random() > 0.5:
                new_size = int(
                    random.uniform(self.min_scale, self.max_scale) *
                    np.min(image.size))
            else:
                new_size = int(np.min(image.size))
            if new_size < np.max(self.size):  # make it viable for cropping
                new_size = int(np.max(self.size))
            image, label, label_c, label_m = [
                tx.resize(x, new_size)
                for x in (image, label, label_c, label_m)
            ]
            if self.precise_contour:
                # regenerate all resized masks (bilinear interpolation) and compose them afterwards
                pil_masks = [tx.resize(m, new_size) for m in pil_masks]
                label_gt = compose_mask(pil_masks, pil=True)
            else:
                # label_gt use NEAREST instead of BILINEAR (default) to avoid polluting instance labels after augmentation
                label_gt = tx.resize(label_gt,
                                     new_size,
                                     interpolation=Image.NEAREST)

            # perform RandomCrop()
            i, j, h, w = transforms.RandomCrop.get_params(image, self.size)
            image, label, label_c, label_m, label_gt = [
                tx.crop(x, i, j, h, w)
                for x in (image, label, label_c, label_m, label_gt)
            ]
            if self.precise_contour:
                pil_masks = [tx.crop(m, i, j, h, w) for m in pil_masks]

            # Note: RandomResizedCrop() is popularly used to train the Inception networks, but might not the best choice for segmentation?
            # # perform RandomResizedCrop()
            # i, j, h, w = transforms.RandomResizedCrop.get_params(
            #     image,
            #     scale=(0.5, 1.0)
            #     ratio=(3. / 4., 4. / 3.)
            # )
            # # label_gt use NEAREST instead of BILINEAR (default) to avoid polluting instance labels after augmentation
            # image, label, label_c, label_m = [tx.resized_crop(x, i, j, h, w, self.size) for x in (image, label, label_c, label_m)]
            # label_gt = tx.resized_crop(label_gt, i, j, h, w, self.size, interpolation=Image.NEAREST)

            # perform Elastic Distortion
            if self.elastic_distortion and random.random() > 0.75:
                indices = ElasticDistortion.get_params(image)
                image, label, label_c, label_m = [
                    ElasticDistortion.transform(x, indices)
                    for x in (image, label, label_c, label_m)
                ]
                if self.precise_contour:
                    pil_masks = [
                        ElasticDistortion.transform(m, indices)
                        for m in pil_masks
                    ]
                    label_gt = compose_mask(pil_masks, pil=True)
                else:
                    label_gt = ElasticDistortion.transform(
                        label_gt, indices, spline_order=0
                    )  # spline_order=0 to avoid polluting instance labels

            # perform RandomHorizontalFlip()
            if random.random() > 0.5:
                image, label, label_c, label_m, label_gt = [
                    tx.hflip(x)
                    for x in (image, label, label_c, label_m, label_gt)
                ]

            # perform RandomVerticalFlip()
            if random.random() > 0.5:
                image, label, label_c, label_m, label_gt = [
                    tx.vflip(x)
                    for x in (image, label, label_c, label_m, label_gt)
                ]

            # perform Random Rotation (0, 90, 180, and 270 degrees)
            random_degree = random.randint(0, 3) * 90
            image, label, label_c, label_m, label_gt = [
                tx.rotate(x, random_degree)
                for x in (image, label, label_c, label_m, label_gt)
            ]

            # perform random color invert, assuming 3 channels (rgb) images
            if self.color_invert and random.random() > 0.5:
                image = ImageOps.invert(image)

            # perform ColorJitter()
            if self.color_jitter and random.random() > 0.5:
                color = transforms.ColorJitter.get_params(0.5, 0.5, 0.5, 0.25)
                image = color(image)

        elif self.resize:  # resize down image
            image, label, label_c, label_m = [
                tx.resize(x, self.size)
                for x in (image, label, label_c, label_m)
            ]
            if self.precise_contour:
                pil_masks = [tx.resize(m, self.size) for m in pil_masks]
                label_gt = compose_mask(pil_masks, pil=True)
            else:
                label_gt = tx.resize(label_gt,
                                     self.size,
                                     interpolation=Image.NEAREST)

        # replaced with 'thinner' contour based on augmented/transformed mask
        if self.detect_contour:
            label_c, label_m, weight = get_instances_contour_interior(
                np.asarray(label_gt))
            label_c, label_m = Image.fromarray(label_c), Image.fromarray(
                label_m)

        # Due to resize algorithm may introduce anti-alias edge, aka. non binary value,
        # thereafter map every pixel back to 0 and 255
        if self.label_binary:
            label, label_c, label_m = [
                x.point(lambda p, threhold=100: 255 if p > threhold else 0)
                for x in (label, label_c, label_m)
            ]
            # For train contour only, leverage the merged instances contour label (label_c)
            # the side effect is losing instance count information
            if self.only_contour:
                label_gt = label_c

        # perform ToTensor()
        if self.tensor:
            image, label, label_c, label_m, label_gt = \
                    [tx.to_tensor(x) for x in (image, label, label_c, label_m, label_gt)]
            # perform Normalize()
            image = tx.normalize(image, self.mean, self.std)

        # prepare a shadow copy of composed data to avoid screwup cached data
        x = sample.copy()
        x['image'], x['label'], x['label_c'], x['label_m'], x['label_gt'] = \
                image, label, label_c, label_m, label_gt

        if self.weight_map and weight is not None:
            weight = np.expand_dims(weight, 0)
            x['weight'] = torch.from_numpy(weight)

        if 'pil_masks' in x:
            del x['pil_masks']

        return x
コード例 #4
0
    def __call__(self, sample):
        if not 'label' in sample.keys():
            image = sample['image']
            if self.resize:  # resize down image
                image= tx.resize(image, self.size)
            # perform ToTensor()
            if self.tensor:
                image = tx.to_tensor(image)
                # perform Normalize()
                image = tx.normalize(image, self.mean, self.std)

            # prepare a shadow copy of composed data to avoid screwup cached data
            x = sample.copy()
            x['image'] = image
            return x

        image, label = sample['image'], sample['label']

        if self.augment:
            if self.color_equalize and random.random() > 0.5:
                image = clahe(image)

            # perform RandomResize() or just enlarge for image size < model input size
            if random.random() > 0.5:
                new_size = int(random.uniform(self.min_scale, self.max_scale) * np.min(image.size))
            else:
                new_size = int(np.min(image.size))
            if new_size < np.max(self.size): # make it viable for cropping
                new_size = int(np.max(self.size))
            image, label = [tx.resize(x, new_size) for x in (image, label)]

            # perform RandomCrop()
            i, j, h, w = transforms.RandomCrop.get_params(image, self.size)
            image, label = [tx.crop(x, i, j, h, w) for x in (image, label)]

            # perform RandomHorizontalFlip()
            if random.random() > 0.5:
                image, label = [tx.hflip(x) for x in (image, label)]

            # perform RandomVerticalFlip()
            if random.random() > 0.5:
                image, label = [tx.vflip(x) for x in (image, label)]

            # perform Random Rotation (0, 90, 180, and 270 degrees)
            random_degree = random.randint(0, 3) * 90
            image, label = [tx.rotate(x, random_degree) for x in (image, label)]

            # perform channel shuffle
            if self.channel_shuffle:
                image = ChannelShuffle()(image)

            # perform random color invert, assuming 3 channels (rgb) images
            if self.color_invert and random.random() > 0.5:
                image = ImageOps.invert(image)

            # # perform ColorJitter()
            # if self.color_jitter and random.random() > 0.5:
            #     color = transforms.ColorJitter.get_params(0.5, 0.5, 0.5, 0.25)
            #     image = color(image)

            if self.add_noise and random.random() > 0.5:
                image = add_noise(image)
            
        elif self.resize:  # resize down image
            image, label = [tx.resize(x, self.size) for x in (image, label)]
        # perform ToTensor()
        if self.tensor:
            # image, label = \
            #         [tx.to_tensor(x) for x in (image, label)]
            image = tx.to_tensor(image)
            label = torch.tensor(np.array(label))
            # perform Normalize()
            image = tx.normalize(image, self.mean, self.std)

        # prepare a shadow copy of composed data to avoid screwup cached data
        x = sample.copy()
        x['image'], x['label'] = image, label

        return x