Beispiel #1
0
    def run_on_image(self, image):
        """
        Args:
            image (np.ndarray): an image of shape (H, W, C) (in BGR order).
                This is the format used by OpenCV.

        Returns:
            predictions (dict): the output of the model.
            vis_output (VisImage): the visualized image output.
        """
        vis_output = None
        predictions = self.predictor(image)
        # Convert image from OpenCV BGR format to Matplotlib RGB format.
        image = image[:, :, ::-1]
        visualizer = Visualizer(image,
                                self.metadata,
                                instance_mode=self.instance_mode)
        if "panoptic_seg" in predictions:
            panoptic_seg, segments_info = predictions["panoptic_seg"]
            vis_output = visualizer.draw_panoptic_seg_predictions(
                panoptic_seg.to(self.cpu_device), segments_info)
        else:
            if "sem_seg" in predictions:
                vis_output = visualizer.draw_sem_seg(
                    predictions["sem_seg"].argmax(dim=0).to(self.cpu_device))
            if "instances" in predictions:
                instances = predictions["instances"].to(self.cpu_device)
                vis_output = visualizer.draw_instance_predictions(
                    predictions=instances)

            # add branch2 classification task result
            if "classification" in predictions:
                classification = predictions["classification"].to(
                    self.cpu_device)
                vis_output = visualizer.draw_classification_predictions(
                    predictions=classification)

        return predictions, vis_output
Beispiel #2
0
 def visualize(self, img, instances):
     # Visualize the image with object masks
     v = Visualizer(img[:, :, ::-1], MetadataCatalog.get(self.cfg.DATASETS.TRAIN[0]), scale=1.2)
     v = v.draw_instance_predictions(instances)
     return v.get_image()
Beispiel #3
0
        flag = False
        for ann in dic['annotations']:
            category = metadata.get('thing_classes')[ann['category_id']]

        img = cv2.imread(dic["file_name"], cv2.IMREAD_COLOR)[:, :, ::-1]
        basename = os.path.basename(dic["file_name"])

        vis = Visualizer(img, metadata)
        vis_pred = vis.draw_proposals_separately(
            proposal_by_image[dic["image_id"]], img.shape[:2],
            args.conf_threshold)

        predictions = create_instances(pred_by_image[dic["image_id"]],
                                       img.shape[:2])
        vis = Visualizer(img, metadata)
        pred = vis.draw_instance_predictions(predictions).get_image()
        vis_pred.append(pred)
        vis = Visualizer(img, metadata)
        gt = vis.draw_dataset_dict(dic).get_image()

        concat = vis.smart_concatenate(vis_pred, min_side=1960)
        vis = np.concatenate([pred, gt], axis=1)

        if args.show:
            webcv2.imshow(basename + ' - Press D for details', vis[:, :, ::-1])
            key = webcv2.waitKey()
            if key == 100:  # 'd'
                webcv2.imshow(basename, concat[:, :, ::-1])
                webcv2.waitKey()
        else:
            cv2.imwrite(os.path.join(args.output, basename), vis[:, :, ::-1])
Beispiel #4
0
            if not len(grouped_gt[range_name]) > 0:
                continue
            visualized = True

            vis = Visualizer(img, metadata, scale=scale)
            topk_boxes, topk_indices = vis.topk_iou_boxes(
                predictions.pred_boxes,
                Boxes([
                    BoxMode.convert(x["bbox"], BoxMode.XYWH_ABS,
                                    BoxMode.XYXY_ABS)
                    for x in grouped_gt[range_name]
                ]))
            topk_indices = topk_indices.reshape((-1, ))
            # Transform indices to list since shape 1 tensors will be regarded as scalars.
            vis.draw_dataset_dict({"annotations": grouped_gt[range_name]})
            vis_boxes = vis.draw_instance_predictions(
                predictions[topk_indices.tolist()])

            if args.show:
                webcv2.imshow(basename + "-boxes@" + range_name,
                              vis_boxes.get_image()[..., ::-1])
            else:
                save(vis_boxes.get_image()[..., ::-1], args.output, "boxes",
                     basename + "@%s.jpg" % range_name)

            vis_anchor = Visualizer(img, metadata)
            anchors = predictions.anchors.tensor[topk_indices]
            vis_anchor = vis_anchor.overlay_instances(
                boxes=anchors.reshape(-1, 4),
                labels=predictions.scores[topk_indices.reshape(-1).tolist()])

            if args.show: