示例#1
0
 def setup(tmp_dir):
     ds_name = create_local_dataset(tmp_dir, 5, 10, 10)
     runner = default_runner.Detectron2GoRunner()
     cfg = _get_cfg(runner, tmp_dir, ds_name)
     cfg.merge_from_list(
         (["MODEL.META_ARCHITECTURE", "MetaArchForTestQAT"] +
          ["QUANTIZATION.QAT.ENABLED", "True"] +
          ["QUANTIZATION.QAT.START_ITER", "0"] +
          ["QUANTIZATION.QAT.ENABLE_OBSERVER_ITER", "0"]))
     return runner, cfg
示例#2
0
    def test_d2go_runner_test(self):
        with tempfile.TemporaryDirectory() as tmp_dir:
            ds_name = create_local_dataset(tmp_dir, 5, 10, 10)
            runner = default_runner.Detectron2GoRunner()
            cfg = _get_cfg(runner, tmp_dir, ds_name)

            model = runner.build_model(cfg)
            results = runner.do_test(cfg, model)
            self.assertEqual(results["default"][ds_name]["bbox"]["AP"], 10.0)
            default_runner._close_all_tbx_writers()
示例#3
0
    def test_d2go_runner_train(self):
        with tempfile.TemporaryDirectory() as tmp_dir:
            ds_name = create_local_dataset(tmp_dir, 5, 10, 10)
            runner = default_runner.Detectron2GoRunner()
            cfg = _get_cfg(runner, tmp_dir, ds_name)

            model = runner.build_model(cfg)
            runner.do_train(cfg, model, resume=True)
            final_model_path = os.path.join(tmp_dir, "model_final.pth")
            self.assertTrue(os.path.isfile(final_model_path))
            default_runner._close_all_tbx_writers()
示例#4
0
    def test_build_model(self):
        with tempfile.TemporaryDirectory() as tmp_dir:
            ds_name = create_local_dataset(tmp_dir, 5, 10, 10)
            runner = oss_runner.DETRRunner()
            cfg = _get_cfg(runner, tmp_dir, ds_name)
            model = runner.build_model(cfg)
            dl = runner.build_detection_train_loader(cfg)
            batch = next(iter(dl))
            output = model(batch)
            self.assertIsInstance(output, dict)

            model.eval()
            output = model(batch)
            self.assertIsInstance(output, list)
            default_runner._close_all_tbx_writers()
示例#5
0
 def setup(tmp_dir, backend, qat_method):
     ds_name = create_local_dataset(tmp_dir, 5, 10, 10)
     runner = default_runner.Detectron2GoRunner()
     cfg = _get_cfg(runner, tmp_dir, ds_name)
     cfg.merge_from_list(
         (["MODEL.META_ARCHITECTURE", "MetaArchForTestQAT1"] +
          ["QUANTIZATION.QAT.ENABLED", "True"] +
          ["QUANTIZATION.QAT.START_ITER", "1"] +
          ["QUANTIZATION.QAT.ENABLE_OBSERVER_ITER", "0"] +
          ["QUANTIZATION.QAT.ENABLE_LEARNABLE_OBSERVER_ITER", "2"] +
          ["QUANTIZATION.QAT.DISABLE_OBSERVER_ITER", "4"] +
          ["QUANTIZATION.QAT.FREEZE_BN_ITER", "4"] +
          ["QUANTIZATION.BACKEND", backend] +
          ["QUANTIZATION.QAT.FAKE_QUANT_METHOD", qat_method]))
     return runner, cfg
示例#6
0
    def test_d2go_runner_ema(self):
        with tempfile.TemporaryDirectory() as tmp_dir:
            ds_name = create_local_dataset(tmp_dir, 5, 10, 10)
            runner = default_runner.Detectron2GoRunner()
            cfg = _get_cfg(runner, tmp_dir, ds_name)
            cfg.MODEL.META_ARCHITECTURE = "MetaArchForTestSingleValue"
            cfg.MODEL_EMA.ENABLED = True
            cfg.MODEL_EMA.DECAY = 0.9

            def _run_train(cfg):
                cfg = copy.deepcopy(cfg)
                model = runner.build_model(cfg)
                model = DistributedDataParallel(model, broadcast_buffers=False)
                runner.do_train(cfg, model, True)
                final_model_path = os.path.join(tmp_dir, "model_final.pth")
                trained_weights = torch.load(final_model_path)
                self.assertIn("ema_state", trained_weights)
                default_runner._close_all_tbx_writers()
                return final_model_path, model.module.ema_state

            def _run_test(cfg, final_path, gt_ema):
                cfg = copy.deepcopy(cfg)
                cfg.MODEL.WEIGHTS = final_path
                model = runner.build_model(cfg, eval_only=True)
                self.assertGreater(len(model.ema_state.state), 0)
                self.assertEqual(len(model.ema_state.state), len(gt_ema.state))
                self.assertTrue(
                    _compare_state_dict(model.ema_state.state_dict(),
                                        gt_ema.state_dict()))
                results = runner.do_test(cfg, model)
                self.assertEqual(results["default"][ds_name]["bbox"]["AP"],
                                 3.0)
                self.assertEqual(results["ema"][ds_name]["bbox"]["AP"], 9.0)
                default_runner._close_all_tbx_writers()

            def _run_build_model_with_ema_weight(cfg, final_path, gt_ema):
                cfg = copy.deepcopy(cfg)
                cfg.MODEL.WEIGHTS = final_path
                cfg.MODEL_EMA.USE_EMA_WEIGHTS_FOR_EVAL_ONLY = True
                model = runner.build_model(cfg, eval_only=True)
                self.assertTrue(
                    _compare_state_dict(model.state_dict(),
                                        gt_ema.state_dict()))

            final_model_path, gt_ema = _run_train(cfg)
            _run_test(cfg, final_model_path, gt_ema)
            _run_build_model_with_ema_weight(cfg, final_model_path, gt_ema)
示例#7
0
    def test_d2go_build_evaluator(self):
        for rotated, evaluator in [
            (True, RotatedCOCOEvaluator),
            (False, COCOEvaluator),
        ]:
            with tempfile.TemporaryDirectory() as tmp_dir:
                ds_name = create_local_dataset(tmp_dir,
                                               5,
                                               10,
                                               10,
                                               is_rotated=rotated)
                runner = default_runner.Detectron2GoRunner()
                cfg = _get_cfg(runner, tmp_dir, ds_name)

                ds_evaluators = runner.get_evaluator(cfg, ds_name, tmp_dir)
                self.assertTrue(
                    isinstance(ds_evaluators._evaluators[0], evaluator))
示例#8
0
    def test_d2go_runner_trainer_hooks(self):
        counts = 0

        @TRAINER_HOOKS_REGISTRY.register()
        def _check_hook_func(hooks):
            nonlocal counts
            counts = len(hooks)
            print(hooks)

        with tempfile.TemporaryDirectory() as tmp_dir:
            ds_name = create_local_dataset(tmp_dir, 5, 10, 10)
            runner = default_runner.Detectron2GoRunner()
            cfg = _get_cfg(runner, tmp_dir, ds_name)
            model = runner.build_model(cfg)
            runner.do_train(cfg, model, resume=True)

            default_runner._close_all_tbx_writers()

        self.assertGreater(counts, 0)
示例#9
0
def create_detection_cfg(runner, output_dir):
    ds_name = create_local_dataset(output_dir, 5, 10, 10)
    cfg = runner.get_default_cfg()
    return get_det_meta_arch_cfg(cfg, ds_name, output_dir)
示例#10
0
def maskrcnn_export_caffe2_vs_torchvision_opset_format_example(self):
    with make_temp_directory("export_demo") as tmp_dir:
        # use a fake dataset for ci
        dataset_name = create_local_dataset(tmp_dir, 5, 224, 224)
        config_list = [
            "DATASETS.TRAIN",
            (dataset_name, ),
            "DATASETS.TEST",
            (dataset_name, ),
        ]
        # START_WIKI_EXAMPLE_TAG
        runner = GeneralizedRCNNRunner()
        cfg = runner.get_default_cfg()
        cfg.merge_from_file("detectron2go://mask_rcnn_fbnetv3a_dsmask_C4.yaml")
        cfg.merge_from_list(get_quick_test_config_opts())
        cfg.merge_from_list(config_list)

        # equivalent to running:
        #   exporter.par --runner GeneralizedRCNNRunner --config-file config.yaml --predictor-types torchscript tourchscript@c2_ops --output-dir tmp_dir
        _ = main(
            cfg,
            tmp_dir,
            runner,
            predictor_types=["torchscript@c2_ops", "torchscript"],
        )

        # the path can be fetched from the return of main, here just use hard-coded values
        torchvision_ops_model = torch.jit.load(
            os.path.join(tmp_dir, "torchscript", "model.jit"))
        caffe2_ops_model = torch.jit.load(
            os.path.join(tmp_dir, "torchscript@c2_ops", "model.jit"))

        # Running inference using torchvision-style format
        image = torch.zeros(1, 64, 96)  # chw 3D tensor
        # The exported model can run on both cpu/gpu
        device = "cuda:0" if torch.cuda.is_available() else "cpu"
        torchvision_ops_model = torchvision_ops_model.to(device)
        torchvision_style_outputs = torchvision_ops_model(
            image)  # suppose N instances are detected
        # NOTE: the output are flattened tensors of the real output (which is a dict), they're
        # ordered by the key in dict, which is deterministic for the given model, but it might
        # be difficult to figure out just from model.jit file. The predictor_info.json from
        # the same directory contains the `outputs_schema`, which indicate how the final output
        # is constructed from flattened tensors.
        (
            pred_boxes,  # torch.Size([N, 4])
            pred_classes,  # torch.Size([N])
            pred_masks,  # torch.Size([N, 1, Hmask, Wmask])
            scores,  # torch.Size([N])
            image_sizes,  # torch.Size([2])
        ) = torchvision_style_outputs
        self.assertTrue(
            all(x.device == torch.device(device)
                for x in torchvision_style_outputs[:4]),
            torchvision_style_outputs,
        )
        torch.testing.assert_close(image_sizes, torch.tensor([64, 96]))

        # Running inference using caffe2-style format
        data = torch.zeros(1, 1, 64, 96)
        im_info = torch.tensor([[64, 96, 1.0]])
        caffe2_style_outputs = caffe2_ops_model([data, im_info])
        # NOTE: the output order is determined in the order of creating the tensor during
        # forward function, it's also follow the order of original Caffe2 model.
        roi_bbox_nms = caffe2_style_outputs[0]  # torch.Size([N, 4])
        roi_score_nms = caffe2_style_outputs[1]  # torch.Size([N])
        roi_class_nms = caffe2_style_outputs[2]  # torch.Size([N])
        mask_fcn_probs = caffe2_style_outputs[
            3]  # torch.Size([N, Cmask, Hmask, Wmask])

        # relations between torchvision-style outputs and caffe2-style outputs
        torch.testing.assert_close(pred_boxes,
                                   roi_bbox_nms,
                                   check_device=False)
        torch.testing.assert_close(pred_classes,
                                   roi_class_nms.to(torch.int64),
                                   check_device=False)
        torch.testing.assert_close(
            pred_masks,
            mask_fcn_probs[:, roi_class_nms.to(torch.int64), :, :],
            check_device=False,
        )
        torch.testing.assert_close(scores, roi_score_nms, check_device=False)