예제 #1
0
        def setUp(self):
            runner = GeneralizedRCNNRunner()
            self.cfg = runner.get_default_cfg()
            self.is_mcs = False
            self.setup_custom_test()

            # NOTE: change some config to make the model run fast
            self.cfg.merge_from_list(get_quick_test_config_opts())

            self.cfg.merge_from_list(["MODEL.DEVICE", "cpu"])
            self.test_model = runner.build_model(self.cfg, eval_only=True)
예제 #2
0
    def test_export_torchvision_format(self):
        runner = GeneralizedRCNNRunner()
        cfg = runner.get_default_cfg()
        cfg.merge_from_file("detectron2go://mask_rcnn_fbnetv3a_dsmask_C4.yaml")
        cfg.merge_from_list(get_quick_test_config_opts())

        cfg.merge_from_list(["MODEL.DEVICE", "cpu"])
        pytorch_model = runner.build_model(cfg, eval_only=True)

        from typing import Dict, List

        class Wrapper(torch.nn.Module):
            def __init__(self, model):
                super().__init__()
                self.model = model

            def forward(self, inputs: List[torch.Tensor]):
                x = inputs[0].unsqueeze(0) * 255
                scale = 320.0 / min(x.shape[-2], x.shape[-1])
                x = torch.nn.functional.interpolate(
                    x,
                    scale_factor=scale,
                    mode="bilinear",
                    align_corners=True,
                    recompute_scale_factor=True,
                )
                out = self.model(x[0])
                res: Dict[str, torch.Tensor] = {}
                res["boxes"] = out[0] / scale
                res["labels"] = out[2]
                res["scores"] = out[1]
                return inputs, [res]

        size_divisibility = max(pytorch_model.backbone.size_divisibility, 10)
        h, w = size_divisibility, size_divisibility * 2
        with create_detection_data_loader_on_toy_dataset(
                cfg, h, w, is_train=False) as data_loader:
            with make_temp_directory(
                    "test_export_torchvision_format") as tmp_dir:
                predictor_path = convert_and_export_predictor(
                    cfg,
                    copy.deepcopy(pytorch_model),
                    "torchscript",
                    tmp_dir,
                    data_loader,
                )

                orig_model = torch.jit.load(
                    os.path.join(predictor_path, "model.jit"))
                wrapped_model = Wrapper(orig_model)
                # optionally do a forward
                wrapped_model([torch.rand(3, 600, 600)])
                scripted_model = torch.jit.script(wrapped_model)
                scripted_model.save(os.path.join(tmp_dir, "new_file.pt"))
예제 #3
0
#cfg.merge_from_file(model_zoo.get_config_file("COCO-InstanceSegmentation/mask_rcnn_R_50_C4_1x.yaml"))
#cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url("COCO-InstanceSegmentation/mask_rcnn_R_50_C4_1x.yaml")

cfg.merge_from_file(
    r"/home/pi/Desktop/project xcv/torchscript_int8@tracing/config2.yml")
cfg.MODEL.WEIGHTS = os.path.join(
    r"/home/pi/Desktop/project xcv/torchscript_int8@tracing/data.pth")
cfg.MODEL.DEVICE = "cpu"  #"cuda"
#cfg.merge_from_file(model_zoo.get_config_file("COCO-Keypoints/keypoint_rcnn_R_50_FPN_1x.yaml"))
#cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url("COCO-Keypoints/keypoint_rcnn_R_50_FPN_1x.yaml")

cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.6  # set threshold for this model
#model = runner.build_model(cfg)
#predictor = DefaultPredictor(cfg)

model = runner.build_model(cfg)
#model = create_predictor(predictor_path)
predictor = DemoPredictor(model)

#model = create_predictor(predictor_path)
#predictor = DemoPredictor(model)
#cap=cv2.VideoCapture(0)
cap = cv2.VideoCapture("/home/pi/Desktop/project xcv/video-clip.mp4")
fps = cap.get(cv2.CAP_PROP_FPS)
num_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))

frame_width = int(cap.get(3))
frame_height = int(cap.get(4))
예제 #4
0
class TestKmeansAnchors(unittest.TestCase):
    def setUp(self):
        self.runner = GeneralizedRCNNRunner()

    def _get_default_cfg(self):
        cfg = self.runner.get_default_cfg()
        add_kmeans_anchors_cfg(cfg)
        return cfg

    @unittest.skip("This can only run locally and takes significant of time")
    def test_matching_previous_results(self):
        cfg = self._get_default_cfg()
        cfg.INPUT.MIN_SIZE_TRAIN = (144, )
        cfg.MODEL.KMEANS_ANCHORS.KMEANS_ANCHORS_ON = True
        cfg.MODEL.KMEANS_ANCHORS.NUM_CLUSTERS = 10
        cfg.MODEL.KMEANS_ANCHORS.NUM_TRAINING_IMG = 512
        cfg.MODEL.KMEANS_ANCHORS.DATASETS = ()

        # NOTE: create a data loader that samples exact the same as previous
        # implementation. In D2Go, we will rely on the train loader instead.
        # NOTE: in order to load OV580_XRM dataset, change the IM_DIR to:
        # "/mnt/vol/gfsai-east/aml/mobile-vision//dataset/oculus/hand_tracking//torch/Segmentation/OV580_XRM_640x480_V3_new_rerun/images"  # noqa
        data_loader = build_sequence_loader(
            cfg,
            # dataset_name="coco_2014_valminusminival",
            # dataset_name="OV580_XRM_640x480_V3_train",
            dataset_name="OV580_XRM_640x480_V3_heldOut_small_512",
            mapper=self.runner.get_mapper(cfg, is_train=True),
            total_samples=cfg.MODEL.KMEANS_ANCHORS.NUM_TRAINING_IMG,
            batch_size=3,
        )

        kmeans_anchors = compute_kmeans_anchors(cfg,
                                                data_loader,
                                                sort_by_area=False,
                                                _stride=16,
                                                _legacy_plus_one=True)

        # Taken from D9849940
        reference_anchors = np.array([
            [-15.33554182, -15.29361029, 31.33554182, 31.29361029],  # noqa
            [-9.34156693, -9.32553548, 25.34156693, 25.32553548],  # noqa
            [-6.03052776, -6.02034167, 22.03052776, 22.02034167],  # noqa
            [-2.25951741, -2.182888, 18.25951741, 18.182888],  # noqa
            [-18.93553378, -18.93553403, 34.93553378, 34.93553403],  # noqa
            [-12.69068356, -12.73989029, 28.69068356, 28.73989029],  # noqa
            [-24.73489189, -24.73489246, 40.73489189, 40.73489246],  # noqa
            [-4.06014466, -4.06014469, 20.06014466, 20.06014469],  # noqa
            [-7.61036119, -7.60467538, 23.61036119, 23.60467538],  # noqa
            [-10.88200579, -10.87634414, 26.88200579, 26.87634414],  # noqa
        ])
        np.testing.assert_allclose(kmeans_anchors,
                                   reference_anchors,
                                   atol=1e-6)

    def test_build_model(self):
        cfg = self._get_default_cfg()
        cfg.INPUT.MIN_SIZE_TRAIN = (60, )
        cfg.MODEL.KMEANS_ANCHORS.KMEANS_ANCHORS_ON = True
        cfg.MODEL.KMEANS_ANCHORS.NUM_CLUSTERS = 3
        cfg.MODEL.KMEANS_ANCHORS.NUM_TRAINING_IMG = 5
        cfg.MODEL.KMEANS_ANCHORS.DATASETS = ("toy_dataset", )

        cfg.MODEL.DEVICE = "cpu"
        cfg.MODEL.ANCHOR_GENERATOR.NAME = "KMeansAnchorGenerator"

        with register_toy_coco_dataset(
                "toy_dataset",
                image_size=(80, 60),  # w, h
                num_images=cfg.MODEL.KMEANS_ANCHORS.NUM_TRAINING_IMG,
        ):
            model = self.runner.build_model(cfg)
            trainer = SimpleTrainer(model, data_loader=[], optimizer=None)
            trainer_hooks = [compute_kmeans_anchors_hook(self.runner, cfg)]
            trainer.register_hooks(trainer_hooks)
            trainer.before_train()
            anchor_generator = model.proposal_generator.anchor_generator
            cell_anchors = list(anchor_generator.cell_anchors)
            gt_anchors = np.array([
                [-20, -15, 20, 15]  # toy_dataset's bbox is half size of image
                for _ in range(cfg.MODEL.KMEANS_ANCHORS.NUM_CLUSTERS)
            ])
            np.testing.assert_allclose(cell_anchors[0], gt_anchors)
예제 #5
0
#cfg.merge_from_file(model_zoo.get_config_file("COCO-Detection/faster_rcnn_R_50_C4_1x.yaml"))
#cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url("COCO-Detection/faster_rcnn_R_50_C4_1x.yaml")

#cfg.merge_from_file(model_zoo.get_config_file("COCO-InstanceSegmentation/mask_rcnn_R_50_C4_1x.yaml"))
#cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url("COCO-InstanceSegmentation/mask_rcnn_R_50_C4_1x.yaml")

cfg.merge_from_file(r"/content/config1.yml")
cfg.MODEL.WEIGHTS =  os.path.join("/content/output/model_final.pth")
cfg.MODEL.DEVICE = "cuda"

#cfg.merge_from_file(model_zoo.get_config_file("COCO-Keypoints/keypoint_rcnn_R_50_FPN_1x.yaml"))
#cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url("COCO-Keypoints/keypoint_rcnn_R_50_FPN_1x.yaml")

cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.6  # set threshold for this model

model = runner.build_model(cfg)
predictor = DefaultPredictor(cfg)


#model = runner.build_model(cfg)
#model = create_predictor(predictor_path)
#predictor = DemoPredictor(model)

#model = create_predictor(predictor_path)
#predictor = DemoPredictor(model)

#cap=cv2.VideoCapture('/content/video.mp4')
cap=cv2.VideoCapture("/content/drive/MyDrive/d2go/VID_20210701_130909.mp4")
fps = cap.get(cv2.CAP_PROP_FPS)
num_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))