Esempio n. 1
0
        def _test_export(self, predictor_type, compare_match=True):
            size_divisibility = max(self.test_model.backbone.size_divisibility, 10)
            h, w = size_divisibility, size_divisibility * 2
            with create_fake_detection_data_loader(h, w, is_train=False) as data_loader:
                inputs = next(iter(data_loader))

                with make_temp_directory(
                    "test_export_{}".format(predictor_type)
                ) as tmp_dir:
                    # TODO: the export may change model it self, need to fix this
                    model_to_export = copy.deepcopy(self.test_model)
                    predictor_path = convert_and_export_predictor(
                        self.cfg, model_to_export, predictor_type, tmp_dir, data_loader
                    )

                    predictor = create_predictor(predictor_path)
                    predicotr_outputs = predictor(inputs)
                    _validate_outputs(inputs, predicotr_outputs)

                    if compare_match:
                        with torch.no_grad():
                            pytorch_outputs = self.test_model(inputs)

                        assert_instances_allclose(
                            predicotr_outputs[0]["instances"],
                            pytorch_outputs[0]["instances"],
                        )
Esempio n. 2
0
    def test_mask_head_scriptability(self):
        input_shape = ShapeSpec(channels=1024)
        mask_features = torch.randn(4, 1024, 14, 14)

        image_shapes = [(10, 10), (15, 15)]
        pred_instance0 = Instances(image_shapes[0])
        pred_classes0 = torch.tensor([1, 2, 3], dtype=torch.int64)
        pred_instance0.pred_classes = pred_classes0
        pred_instance1 = Instances(image_shapes[1])
        pred_classes1 = torch.tensor([4], dtype=torch.int64)
        pred_instance1.pred_classes = pred_classes1

        mask_head = MaskRCNNConvUpsampleHead(input_shape,
                                             num_classes=80,
                                             conv_dims=[256, 256]).eval()
        # pred_instance will be in-place changed during the inference
        # process of `MaskRCNNConvUpsampleHead`
        origin_outputs = mask_head(mask_features,
                                   deepcopy([pred_instance0, pred_instance1]))

        fields = {"pred_masks": torch.Tensor, "pred_classes": torch.Tensor}
        with freeze_training_mode(mask_head), patch_instances(
                fields) as NewInstances:
            sciript_mask_head = torch.jit.script(mask_head)
            pred_instance0 = NewInstances.from_instances(pred_instance0)
            pred_instance1 = NewInstances.from_instances(pred_instance1)
            script_outputs = sciript_mask_head(
                mask_features, [pred_instance0, pred_instance1])

        for origin_ins, script_ins in zip(origin_outputs, script_outputs):
            assert_instances_allclose(origin_ins,
                                      script_ins.to_instances(),
                                      rtol=0)
Esempio n. 3
0
    def test_keypoint_head_scriptability(self):
        input_shape = ShapeSpec(channels=1024, height=14, width=14)
        keypoint_features = torch.randn(4, 1024, 14, 14)

        image_shapes = [(10, 10), (15, 15)]
        pred_boxes0 = torch.tensor([[1, 1, 3, 3], [2, 2, 6, 6], [1, 5, 2, 8]], dtype=torch.float32)
        pred_instance0 = Instances(image_shapes[0])
        pred_instance0.pred_boxes = Boxes(pred_boxes0)
        pred_boxes1 = torch.tensor([[7, 3, 10, 5]], dtype=torch.float32)
        pred_instance1 = Instances(image_shapes[1])
        pred_instance1.pred_boxes = Boxes(pred_boxes1)

        keypoint_head = KRCNNConvDeconvUpsampleHead(
            input_shape, num_keypoints=17, conv_dims=[512, 512]
        ).eval()
        origin_outputs = keypoint_head(
            keypoint_features, deepcopy([pred_instance0, pred_instance1])
        )

        fields = {
            "pred_boxes": Boxes,
            "pred_keypoints": torch.Tensor,
            "pred_keypoint_heatmaps": torch.Tensor,
        }
        with freeze_training_mode(keypoint_head), patch_instances(fields) as NewInstances:
            sciript_keypoint_head = torch.jit.script(keypoint_head)
            pred_instance0 = NewInstances.from_instances(pred_instance0)
            pred_instance1 = NewInstances.from_instances(pred_instance1)
            script_outputs = sciript_keypoint_head(
                keypoint_features, [pred_instance0, pred_instance1]
            )

        for origin_ins, script_ins in zip(origin_outputs, script_outputs):
            assert_instances_allclose(origin_ins, script_ins.to_instances(), rtol=0)
Esempio n. 4
0
    def _test_model(self, config_path, inference_func, batch=1):
        model = model_zoo.get(config_path, trained=True)
        image = get_sample_coco_image()
        inputs = tuple(image.clone() for _ in range(batch))

        wrapper = TracingAdapter(model, inputs, inference_func)
        wrapper.eval()
        with torch.no_grad():
            # trace with smaller images, and the trace must still work
            trace_inputs = tuple(
                nn.functional.interpolate(
                    image, scale_factor=random.uniform(0.5, 0.7))
                for _ in range(batch))
            traced_model = torch.jit.trace(wrapper, trace_inputs)

            outputs = inference_func(model, *inputs)
            traced_outputs = wrapper.outputs_schema(traced_model(*inputs))
        if batch > 1:
            for output, traced_output in zip(outputs, traced_outputs):
                assert_instances_allclose(output,
                                          traced_output,
                                          size_as_tensor=True)
        else:
            assert_instances_allclose(outputs,
                                      traced_outputs,
                                      size_as_tensor=True)
    def _test_model(self, config_path, inference_func):
        model = model_zoo.get(config_path, trained=True)
        image = get_sample_coco_image()

        class Wrapper(nn.ModuleList):  # a wrapper to make the model traceable
            def forward(self, image):
                outputs = inference_func(self[0], image)
                flattened_outputs, schema = flatten_to_tuple(outputs)
                if not hasattr(self, "schema"):
                    self.schema = schema
                return flattened_outputs

            def rebuild(self, flattened_outputs):
                return self.schema(flattened_outputs)

        wrapper = Wrapper([model])
        wrapper.eval()
        with torch.no_grad(), patch_builtin_len():
            small_image = nn.functional.interpolate(image, scale_factor=0.5)
            # trace with a different image, and the trace must still work
            traced_model = torch.jit.trace(wrapper, (small_image,))

            output = inference_func(model, image)
            traced_output = wrapper.rebuild(traced_model(image))
        assert_instances_allclose(output, traced_output, size_as_tensor=True)
Esempio n. 6
0
        def _test_export(self, predictor_type, compare_match=True):
            with self._create_data_loader(is_train=False) as data_loader:
                inputs = next(iter(data_loader))

                # TODO: the export may change model it self, need to fix this
                model_to_export = copy.deepcopy(self.test_model)
                predictor_path = convert_and_export_predictor(
                    self.cfg,
                    model_to_export,
                    predictor_type,
                    self.test_dir,
                    data_loader,
                )

                predictor = create_predictor(predictor_path)
                predictor_outputs = predictor(inputs)
                _validate_outputs(inputs, predictor_outputs)

                if compare_match:
                    with torch.no_grad():
                        pytorch_outputs = self.test_model(inputs)

                    assert_instances_allclose(
                        predictor_outputs[0]["instances"],
                        pytorch_outputs[0]["instances"],
                        size_as_tensor=True,
                    )

            return predictor_path
Esempio n. 7
0
    def test_StandardROIHeads_scriptability(self):
        cfg = get_cfg()
        cfg.MODEL.ROI_BOX_HEAD.NAME = "FastRCNNConvFCHead"
        cfg.MODEL.ROI_BOX_HEAD.NUM_FC = 2
        cfg.MODEL.ROI_BOX_HEAD.POOLER_TYPE = "ROIAlignV2"
        cfg.MODEL.ROI_BOX_HEAD.BBOX_REG_WEIGHTS = (10, 10, 5, 5)
        cfg.MODEL.MASK_ON = True
        cfg.MODEL.ROI_HEADS.NMS_THRESH_TEST = 0.01
        cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.01
        num_images = 2
        images_tensor = torch.rand(num_images, 20, 30)
        image_sizes = [(10, 10), (20, 30)]
        images = ImageList(images_tensor, image_sizes)
        num_channels = 1024
        features = {"res4": torch.rand(num_images, num_channels, 1, 2)}
        feature_shape = {"res4": ShapeSpec(channels=num_channels, stride=16)}

        roi_heads = StandardROIHeads(cfg, feature_shape).eval()

        proposal0 = Instances(image_sizes[0])
        proposal_boxes0 = torch.tensor([[1, 1, 3, 3], [2, 2, 6, 6]],
                                       dtype=torch.float32)
        proposal0.proposal_boxes = Boxes(proposal_boxes0)
        proposal0.objectness_logits = torch.tensor([0.5, 0.7],
                                                   dtype=torch.float32)

        proposal1 = Instances(image_sizes[1])
        proposal_boxes1 = torch.tensor([[1, 5, 2, 8], [7, 3, 10, 5]],
                                       dtype=torch.float32)
        proposal1.proposal_boxes = Boxes(proposal_boxes1)
        proposal1.objectness_logits = torch.tensor([0.1, 0.9],
                                                   dtype=torch.float32)
        proposals = [proposal0, proposal1]

        pred_instances, _ = roi_heads(images, features, proposals)
        fields = {
            "objectness_logits": torch.Tensor,
            "proposal_boxes": Boxes,
            "pred_classes": torch.Tensor,
            "scores": torch.Tensor,
            "pred_masks": torch.Tensor,
            "pred_boxes": Boxes,
            "pred_keypoints": torch.Tensor,
            "pred_keypoint_heatmaps": torch.Tensor,
        }
        with freeze_training_mode(roi_heads), patch_instances(
                fields) as new_instances:
            proposal0 = new_instances.from_instances(proposal0)
            proposal1 = new_instances.from_instances(proposal1)
            proposals = [proposal0, proposal1]
            scripted_rot_heads = torch.jit.script(roi_heads)
            scripted_pred_instances, _ = scripted_rot_heads(
                images, features, proposals)

        for instance, scripted_instance in zip(pred_instances,
                                               scripted_pred_instances):
            assert_instances_allclose(instance,
                                      scripted_instance.to_instances(),
                                      rtol=0)
Esempio n. 8
0
 def test_flatten_instances_boxes(self):
     inst = Instances(
         torch.tensor([5, 8]), pred_masks=torch.tensor([3]), pred_boxes=Boxes(torch.ones((1, 4)))
     )
     obj = [3, ([5, 6], inst)]
     res, schema = flatten_to_tuple(obj)
     self.assertEqual(res[:3], (3, 5, 6))
     for r, expected in zip(res[3:], (inst.pred_boxes.tensor, inst.pred_masks, inst.image_size)):
         self.assertIs(r, expected)
     new_obj = schema(res)
     assert_instances_allclose(new_obj[1][1], inst, rtol=0.0, size_as_tensor=True)
Esempio n. 9
0
    def _test_model(self, config_path, inference_func):
        model = model_zoo.get(config_path, trained=True)
        image = get_sample_coco_image()

        wrapper = TracingAdapter(model, image, inference_func)
        wrapper.eval()
        with torch.no_grad():
            small_image = nn.functional.interpolate(image, scale_factor=0.5)
            # trace with a different image, and the trace must still work
            traced_model = torch.jit.trace(wrapper, (small_image,))

            output = inference_func(model, image)
            traced_output = wrapper.outputs_schema(traced_model(image))
        assert_instances_allclose(output, traced_output, size_as_tensor=True)
    def _test_model(self, config_path, WrapperCls):
        # TODO wrapper should be handled by export API in the future
        model = model_zoo.get(config_path, trained=True)
        image = get_sample_coco_image()

        model = WrapperCls([model])
        model.eval()
        with torch.no_grad(), patch_builtin_len():
            small_image = nn.functional.interpolate(image, scale_factor=0.5)
            # trace with a different image, and the trace must still work
            traced_model = torch.jit.trace(model, (small_image, ))

            output = WrapperCls.convert_output(model(image))
            traced_output = WrapperCls.convert_output(traced_model(image))
        assert_instances_allclose(output, traced_output)
Esempio n. 11
0
    def _test_retinanet_model(self, config_path):
        model = model_zoo.get(config_path, trained=True)
        model.eval()

        fields = {
            "pred_boxes": Boxes,
            "scores": Tensor,
            "pred_classes": Tensor,
        }
        script_model = export_torchscript_with_instances(model, fields)

        img = get_sample_coco_image()
        inputs = [{"image": img}]
        with torch.no_grad():
            instance = model(inputs)[0]["instances"]
            scripted_instance = convert_scripted_instances(script_model(inputs)[0])
            scripted_instance = detector_postprocess(scripted_instance, img.shape[1], img.shape[2])
        assert_instances_allclose(instance, scripted_instance)
Esempio n. 12
0
    def _test_rcnn_model(self, config_path):
        model = model_zoo.get(config_path, trained=True)
        model.eval()

        fields = {
            "proposal_boxes": Boxes,
            "objectness_logits": Tensor,
            "pred_boxes": Boxes,
            "scores": Tensor,
            "pred_classes": Tensor,
            "pred_masks": Tensor,
        }
        script_model = export_torchscript_with_instances(model, fields)

        inputs = [{"image": get_sample_coco_image()}]
        with torch.no_grad():
            instance = model.inference(inputs, do_postprocess=False)[0]
            scripted_instance = script_model.inference(inputs, do_postprocess=False)[0]
        assert_instances_allclose(instance, scripted_instance)
Esempio n. 13
0
    def _test_rcnn_model(self, config_path):
        model = model_zoo.get(config_path, trained=True)
        model.eval()

        fields = {
            "proposal_boxes": Boxes,
            "objectness_logits": Tensor,
            "pred_boxes": Boxes,
            "scores": Tensor,
            "pred_classes": Tensor,
            "pred_masks": Tensor,
        }
        script_model = scripting_with_instances(model, fields)

        # Test that batch inference with different shapes are supported
        image = get_sample_coco_image()
        small_image = nn.functional.interpolate(image, scale_factor=0.5)
        inputs = [{"image": image}, {"image": small_image}]
        with torch.no_grad():
            instance = model.inference(inputs, do_postprocess=False)[0]
            scripted_instance = script_model.inference(inputs, do_postprocess=False)[0]
        assert_instances_allclose(instance, scripted_instance)
Esempio n. 14
0
    def _test_model(self, config_path, inference_func, batch=1):
        model = model_zoo.get(config_path, trained=True)
        image = get_sample_coco_image()
        inputs = tuple(image.clone() for _ in range(batch))

        wrapper = TracingAdapter(model, inputs, inference_func)
        wrapper.eval()
        with torch.no_grad():
            # trace with smaller images, and the trace must still work
            trace_inputs = tuple(
                nn.functional.interpolate(image, scale_factor=random.uniform(0.5, 0.7))
                for _ in range(batch)
            )
            traced_model = torch.jit.trace(wrapper, trace_inputs)

        testing_devices = self._get_device_casting_test_cases(model)
        # save and load back the model in order to show traceback of TorchScript
        with tempfile.TemporaryDirectory(prefix="detectron2_test") as d:
            basename = "model"
            jitfile = f"{d}/{basename}.jit"
            torch.jit.save(traced_model, jitfile)
            traced_model = torch.jit.load(jitfile)

            if any(device and "cuda" in device for device in testing_devices):
                self._check_torchscript_no_hardcoded_device(jitfile, d, "cuda")

        for device in testing_devices:
            print(f"Testing casting to {device} for inference (traced on {model.device}) ...")
            with torch.no_grad():
                outputs = inference_func(copy.deepcopy(model).to(device), *inputs)
                traced_outputs = wrapper.outputs_schema(traced_model.to(device)(*inputs))
            if batch > 1:
                for output, traced_output in zip(outputs, traced_outputs):
                    assert_instances_allclose(output, traced_output, size_as_tensor=True)
            else:
                assert_instances_allclose(outputs, traced_outputs, size_as_tensor=True)