예제 #1
0
        def _test_export(self, predictor_type, compare_match=True):
            with self._create_data_loader(is_train=False) as data_loader:
                inputs = next(iter(data_loader))

                # TODO: the export may change model it self, need to fix this
                model_to_export = copy.deepcopy(self.test_model)
                predictor_path = convert_and_export_predictor(
                    self.cfg,
                    model_to_export,
                    predictor_type,
                    self.test_dir,
                    data_loader,
                )

                predictor = create_predictor(predictor_path)
                predictor_outputs = predictor(inputs)
                _validate_outputs(inputs, predictor_outputs)

                if compare_match:
                    with torch.no_grad():
                        pytorch_outputs = self.test_model(inputs)

                    assert_instances_allclose(
                        predictor_outputs[0]["instances"],
                        pytorch_outputs[0]["instances"],
                        size_as_tensor=True,
                    )

            return predictor_path
예제 #2
0
        def _test_export(self, predictor_type, compare_match=True):
            size_divisibility = max(self.test_model.backbone.size_divisibility, 10)
            h, w = size_divisibility, size_divisibility * 2
            with create_fake_detection_data_loader(h, w, is_train=False) as data_loader:
                inputs = next(iter(data_loader))

                with make_temp_directory(
                    "test_export_{}".format(predictor_type)
                ) as tmp_dir:
                    # TODO: the export may change model it self, need to fix this
                    model_to_export = copy.deepcopy(self.test_model)
                    predictor_path = convert_and_export_predictor(
                        self.cfg, model_to_export, predictor_type, tmp_dir, data_loader
                    )

                    predictor = create_predictor(predictor_path)
                    predicotr_outputs = predictor(inputs)
                    _validate_outputs(inputs, predicotr_outputs)

                    if compare_match:
                        with torch.no_grad():
                            pytorch_outputs = self.test_model(inputs)

                        assert_instances_allclose(
                            predicotr_outputs[0]["instances"],
                            pytorch_outputs[0]["instances"],
                        )
예제 #3
0
    def _export_simple_model(self, cfg, model, data, output_dir,
                             predictor_type):
        predictor_path = convert_and_export_predictor(
            cfg,
            model,
            predictor_type=predictor_type,
            output_dir=output_dir,
            data_loader=iter([data] * 3),
        )
        self.assertTrue(os.path.isdir(predictor_path))

        # also test loading predictor
        predictor = create_predictor(predictor_path)
        return predictor
예제 #4
0
    def test_create_predictor(self):
        with make_temp_directory("test_model_info") as tmp_dir:
            # define the predictor
            model_a_path = os.path.join(tmp_dir, "model_A")
            predictor_info = PredictorInfo(
                model=ModelInfo(path=model_a_path, type="torchscript"),
                preprocess_info=FuncInfo.gen_func_info(TestPreprocess,
                                                       params={"weight": 2.0}),
            )

            # simulating exporting to predictor
            _save_test_model(model_a_path)
            with open(os.path.join(tmp_dir, "predictor_info.json"), "w") as f:
                json.dump(predictor_info.to_dict(), f)

            predictor = create_predictor(tmp_dir)
            # y = (x * 2) + 1
            self.assertEqual(torch.tensor(5), predictor(torch.tensor(2)))
예제 #5
0
        def _test_export(self, predictor_type, compare_match=True):
            h, w = _get_input_dim(self.test_model)
            dl = _get_data_loader(h, w, False)
            inputs = next(iter(dl))

            output_dir = os.path.join(self.test_dir, "test_export")
            predictor_path = convert_and_export_predictor(
                self.cfg, self.test_model, predictor_type, output_dir, dl)

            predictor = create_predictor(predictor_path)
            predicotr_outputs = predictor(inputs)
            self.assertEqual(len(predicotr_outputs), len(inputs))

            with torch.no_grad():
                pytorch_outputs = self.test_model(inputs)
                self.assertEqual(len(pytorch_outputs), len(inputs))

            if compare_match:
                for predictor_output, pytorch_output in zip(
                        predicotr_outputs, pytorch_outputs):
                    torch.testing.assert_allclose(predictor_output["sem_seg"],
                                                  pytorch_output["sem_seg"])
예제 #6
0
def main(
    cfg,
    output_dir,
    runner,
    # binary specific optional arguments
    predictor_path,
    num_threads=None,
    caffe2_engine=None,
    caffe2_logging_print_net_summary=0,
):
    torch.backends.quantized.engine = cfg.QUANTIZATION.BACKEND
    print("run with quantized engine: ", torch.backends.quantized.engine)

    setup_after_launch(cfg, output_dir, runner)
    caffe2_global_init(caffe2_logging_print_net_summary, num_threads)

    predictor = create_predictor(predictor_path)
    metrics = runner.do_test(cfg, predictor)
    print_metrics_table(metrics)
    return {
        "accuracy": metrics,
        "metrics": metrics,
    }
예제 #7
0
cfg.merge_from_file(r"/content/config2.yml")
cfg.MODEL.WEIGHTS =  os.path.join("/content/torchscript_int8@tracing/data.pth")
cfg.MODEL.DEVICE = "cuda"

#cfg.merge_from_file(model_zoo.get_config_file("COCO-Keypoints/keypoint_rcnn_R_50_FPN_1x.yaml"))
#cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url("COCO-Keypoints/keypoint_rcnn_R_50_FPN_1x.yaml")

cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.6  # set threshold for this model

#model = runner.build_model(cfg)
#predictor = DefaultPredictor(cfg)

start = time.time()
#model = runner.build_model(cfg)
model = create_predictor(predictor_path)
predictor = DemoPredictor(model)

#model = create_predictor(predictor_path)
#predictor = DemoPredictor(model)

#cap=cv2.VideoCapture('/content/video.mp4')
cap=cv2.VideoCapture("/content/drive/MyDrive/d2go/VID_20210701_130909.mp4")
fps = cap.get(cv2.CAP_PROP_FPS)
num_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))

frame_width = int(cap.get(3))
frame_height = int(cap.get(4))