def test_vis_instance_segmentation_invalid_inputs(self):
     with self.assertRaises(ValueError):
         vis_instance_segmentation(self.img,
                                   self.mask,
                                   self.label,
                                   self.score,
                                   label_names=self.label_names)
Exemplo n.º 2
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--gpu', type=int, default=-1)
    parser.add_argument('--pretrained-model', default='sbd')
    parser.add_argument('image')
    args = parser.parse_args()

    model = FCISPSROIAlignResNet101(n_fg_class=20,
                                    pretrained_model=args.pretrained_model)

    if args.gpu >= 0:
        chainer.cuda.get_device_from_id(args.gpu).use()
        model.to_gpu()

    img = read_image(args.image, color=True)

    masks, labels, scores = model.predict([img])
    mask, label, score = masks[0], labels[0], scores[0]
    bbox = mask_to_bbox(mask)
    colors = voc_colormap(list(range(1, len(mask) + 1)))
    ax = vis_bbox(img, bbox, instance_colors=colors, alpha=0.5, linewidth=1.5)
    vis_instance_segmentation(
        None,
        mask,
        label,
        score,
        label_names=sbd_instance_segmentation_label_names,
        instance_colors=colors,
        alpha=0.7,
        ax=ax)
    plt.show()
Exemplo n.º 3
0
    def test_vis_instance_segmentation_invalid_inputs(self):
        if not optional_modules:
            return

        with self.assertRaises(ValueError):
            vis_instance_segmentation(
                self.img, self.bbox, self.mask, self.label, self.score,
                label_names=self.label_names)
Exemplo n.º 4
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument(
        '--model',
        choices=('faster_rcnn_fpn_resnet50', 'faster_rcnn_fpn_resnet101',
                 'mask_rcnn_fpn_resnet50', 'mask_rcnn_fpn_resnet101'),
        default='faster_rcnn_fpn_resnet50')
    parser.add_argument('--gpu', type=int, default=-1)
    parser.add_argument('--pretrained-model', default='coco')
    parser.add_argument('image')
    args = parser.parse_args()

    if args.model == 'faster_rcnn_fpn_resnet50':
        mode = 'bbox'
        model = FasterRCNNFPNResNet50(n_fg_class=len(coco_bbox_label_names),
                                      pretrained_model=args.pretrained_model)
    elif args.model == 'faster_rcnn_fpn_resnet101':
        mode = 'bbox'
        model = FasterRCNNFPNResNet101(n_fg_class=len(coco_bbox_label_names),
                                       pretrained_model=args.pretrained_model)
    elif args.model == 'mask_rcnn_fpn_resnet50':
        mode = 'instance_segmentation'
        model = MaskRCNNFPNResNet50(
            n_fg_class=len(coco_instance_segmentation_label_names),
            pretrained_model=args.pretrained_model)
    elif args.model == 'mask_rcnn_fpn_resnet101':
        mode = 'instance_segmentation'
        model = MaskRCNNFPNResNet101(
            n_fg_class=len(coco_instance_segmentation_label_names),
            pretrained_model=args.pretrained_model)

    if args.gpu >= 0:
        chainer.cuda.get_device_from_id(args.gpu).use()
        model.to_gpu()

    img = utils.read_image(args.image)

    if mode == 'bbox':
        bboxes, labels, scores = model.predict([img])
        bbox = bboxes[0]
        label = labels[0]
        score = scores[0]

        vis_bbox(img, bbox, label, score, label_names=coco_bbox_label_names)
    elif mode == 'instance_segmentation':
        masks, labels, scores = model.predict([img])
        mask = masks[0]
        label = labels[0]
        score = scores[0]
        vis_instance_segmentation(
            img,
            mask,
            label,
            score,
            label_names=coco_instance_segmentation_label_names)
    plt.show()
Exemplo n.º 5
0
def save_instance_image(image,
                        mask,
                        label=None,
                        label_names=None,
                        filename=None):
    vis_instance_segmentation(image, mask, label, label_names=label_names)
    plt.tick_params(labelbottom=False,
                    labelleft=False,
                    labelright=False,
                    labeltop=False)

    plt.tick_params(bottom=False, left=False, right=False, top=False)
    plt.savefig(filename, bbox_inches='tight', pad_inches=0)
    plt.close()
Exemplo n.º 6
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--gpu', type=int, default=-1)
    parser.add_argument('--pretrained-model', default=None)
    parser.add_argument('--dataset', choices=('sbd', 'coco'), default='sbd')
    parser.add_argument('image')
    args = parser.parse_args()

    if args.dataset == 'sbd':
        if args.pretrained_model is None:
            args.pretrained_model = 'sbd'
        label_names = sbd_instance_segmentation_label_names
        model = FCISResNet101(n_fg_class=len(label_names),
                              pretrained_model=args.pretrained_model)
    elif args.dataset == 'coco':
        if args.pretrained_model is None:
            args.pretrained_model = 'coco'
        label_names = coco_instance_segmentation_label_names
        proposal_creator_params = FCISResNet101.proposal_creator_params
        proposal_creator_params['min_size'] = 2
        model = FCISResNet101(n_fg_class=len(label_names),
                              anchor_scales=(4, 8, 16, 32),
                              pretrained_model=args.pretrained_model,
                              proposal_creator_params=proposal_creator_params)

    if args.gpu >= 0:
        chainer.cuda.get_device_from_id(args.gpu).use()
        model.to_gpu()

    img = read_image(args.image, color=True)

    masks, labels, scores = model.predict([img])
    mask, label, score = masks[0], labels[0], scores[0]
    bbox = mask_to_bbox(mask)
    colors = voc_colormap(list(range(1, len(mask) + 1)))
    ax = vis_bbox(img, bbox, instance_colors=colors, alpha=0.5, linewidth=1.5)
    vis_instance_segmentation(None,
                              mask,
                              label,
                              score,
                              label_names=label_names,
                              instance_colors=colors,
                              alpha=0.7,
                              ax=ax)
    plt.show()
    def test_vis_instance_segmentation(self):
        ax = vis_instance_segmentation(self.img,
                                       self.mask,
                                       self.label,
                                       self.score,
                                       label_names=self.label_names,
                                       instance_colors=self.instance_colors)

        self.assertIsInstance(ax, matplotlib.axes.Axes)
Exemplo n.º 8
0
    def test_vis_instance_segmentation(self):
        if not optional_modules:
            return

        ax = vis_instance_segmentation(
            self.img, self.bbox, self.mask, self.label, self.score,
            label_names=self.label_names)

        self.assertIsInstance(ax, matplotlib.axes.Axes)
Exemplo n.º 9
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--gpu', type=int, default=-1)
    parser.add_argument('--pretrained-model', default='coco')
    parser.add_argument('image')
    args = parser.parse_args()

    proposal_creator_params = {
        'nms_thresh': 0.7,
        'n_train_pre_nms': 12000,
        'n_train_post_nms': 2000,
        'n_test_pre_nms': 6000,
        'n_test_post_nms': 1000,
        'force_cpu_nms': False,
        'min_size': 0
    }

    model = FCISPSROIAlignResNet101(
        n_fg_class=len(coco_instance_segmentation_label_names),
        min_size=800, max_size=1333,
        anchor_scales=(2, 4, 8, 16, 32),
        pretrained_model=args.pretrained_model,
        proposal_creator_params=proposal_creator_params)

    if args.gpu >= 0:
        chainer.cuda.get_device_from_id(args.gpu).use()
        model.to_gpu()

    img = read_image(args.image, color=True)

    masks, labels, scores = model.predict([img])
    mask, label, score = masks[0], labels[0], scores[0]
    bbox = mask_to_bbox(mask)
    colors = voc_colormap(list(range(1, len(mask) + 1)))
    ax = vis_bbox(
        img, bbox, instance_colors=colors, alpha=0.5, linewidth=1.5)
    vis_instance_segmentation(
        None, mask, label, score,
        label_names=coco_instance_segmentation_label_names,
        instance_colors=colors, alpha=0.7, ax=ax)
    plt.show()