예제 #1
0
 def build_train_loader(cls, cfg):
     """
     Returns:
         iterable
     It calls :func:`detectron2.data.build_detection_train_loader` with a customized
     DatasetMapper, which adds categorical labels as a semantic mask.
     """
     mapper = DatasetMapperWithBasis(cfg, True)
     return build_detection_train_loader(cfg, mapper=mapper)
예제 #2
0
    def build_train_loader(cls, cfg):
        """
        Returns:
            iterable

        It calls :func:`detectron2.data.build_detection_train_loader` with a customized
        DatasetMapper, which adds categorical labels as a semantic mask.
        """
        register_coco_instances("cam-cv-1.0_Train", {}, '../Images/annotations/noncamo_train.json', '')
        register_coco_instances("cam-cv-1.0_Test", {}, '../Images/annotations/noncamo_test.json', '')
        mapper = DatasetMapperWithBasis(cfg, True)
        # DatasetCatalog.register('cam-cv-1.0_Train', mapper)
        # DatasetCatalog.register('cam-cv-1.0_Test', mapper)
        return build_detection_train_loader(cfg, mapper)
예제 #3
0
    os.makedirs(dirname, exist_ok=True)
    metadata = MetadataCatalog.get(cfg.DATASETS.TRAIN[0])

    def output(vis, fname):
        if args.show:
            print(fname)
            cv2.imshow("window", vis.get_image()[:, :, ::-1])
            cv2.waitKey()
        else:
            filepath = os.path.join(dirname, fname)
            print("Saving to {} ...".format(filepath))
            vis.save(filepath)

    scale = 2.0 if args.show else 1.0
    if args.source == "dataloader":
        mapper = DatasetMapperWithBasis(cfg, True)
        train_data_loader = build_detection_train_loader(cfg, mapper)
        for batch in train_data_loader:
            for per_image in batch:
                # Pytorch tensor is in (C, H, W) format
                img = per_image["image"].permute(1, 2, 0)
                if cfg.INPUT.FORMAT == "BGR":
                    img = img[:, :, [2, 1, 0]]
                else:
                    img = np.asarray(
                        Image.fromarray(img,
                                        mode=cfg.INPUT.FORMAT).convert("RGB"))

                visualizer = Visualizer(img, metadata=metadata, scale=scale)
                target_fields = per_image["instances"].get_fields()
                labels = [