Exemple #1
0
    def _test_model(self, config_path, inference_func, batch=1):
        model = model_zoo.get(config_path, trained=True)
        image = get_sample_coco_image()
        inputs = tuple(image.clone() for _ in range(batch))

        wrapper = TracingAdapter(model, inputs, inference_func)
        wrapper.eval()
        with torch.no_grad():
            # trace with smaller images, and the trace must still work
            trace_inputs = tuple(
                nn.functional.interpolate(
                    image, scale_factor=random.uniform(0.5, 0.7))
                for _ in range(batch))
            traced_model = torch.jit.trace(wrapper, trace_inputs)

            outputs = inference_func(model, *inputs)
            traced_outputs = wrapper.outputs_schema(traced_model(*inputs))
        if batch > 1:
            for output, traced_output in zip(outputs, traced_outputs):
                assert_instances_allclose(output,
                                          traced_output,
                                          size_as_tensor=True)
        else:
            assert_instances_allclose(outputs,
                                      traced_outputs,
                                      size_as_tensor=True)
Exemple #2
0
    def _test_model(self, config_path, inference_func):
        model = model_zoo.get(config_path, trained=True)
        image = get_sample_coco_image()

        wrapper = TracingAdapter(model, image, inference_func)
        wrapper.eval()
        with torch.no_grad():
            small_image = nn.functional.interpolate(image, scale_factor=0.5)
            # trace with a different image, and the trace must still work
            traced_model = torch.jit.trace(wrapper, (small_image,))

            output = inference_func(model, image)
            traced_output = wrapper.outputs_schema(traced_model(image))
        assert_instances_allclose(output, traced_output, size_as_tensor=True)
Exemple #3
0
    def _test_model(self, config_path, inference_func, batch=1):
        model = model_zoo.get(config_path, trained=True)
        image = get_sample_coco_image()
        inputs = tuple(image.clone() for _ in range(batch))

        wrapper = TracingAdapter(model, inputs, inference_func)
        wrapper.eval()
        with torch.no_grad():
            # trace with smaller images, and the trace must still work
            trace_inputs = tuple(
                nn.functional.interpolate(image, scale_factor=random.uniform(0.5, 0.7))
                for _ in range(batch)
            )
            traced_model = torch.jit.trace(wrapper, trace_inputs)

        testing_devices = self._get_device_casting_test_cases(model)
        # save and load back the model in order to show traceback of TorchScript
        with tempfile.TemporaryDirectory(prefix="detectron2_test") as d:
            basename = "model"
            jitfile = f"{d}/{basename}.jit"
            torch.jit.save(traced_model, jitfile)
            traced_model = torch.jit.load(jitfile)

            if any(device and "cuda" in device for device in testing_devices):
                self._check_torchscript_no_hardcoded_device(jitfile, d, "cuda")

        for device in testing_devices:
            print(f"Testing casting to {device} for inference (traced on {model.device}) ...")
            with torch.no_grad():
                outputs = inference_func(copy.deepcopy(model).to(device), *inputs)
                traced_outputs = wrapper.outputs_schema(traced_model.to(device)(*inputs))
            if batch > 1:
                for output, traced_output in zip(outputs, traced_outputs):
                    assert_instances_allclose(output, traced_output, size_as_tensor=True)
            else:
                assert_instances_allclose(outputs, traced_outputs, size_as_tensor=True)
Exemple #4
0
def d2_meta_arch_prepare_for_export(self, cfg, inputs, predictor_type):

    if "torchscript" in predictor_type and "@tracing" in predictor_type:

        def inference_func(model, image):
            inputs = [{"image": image}]
            return model.inference(inputs, do_postprocess=False)[0]

        def data_generator(x):
            return (x[0]["image"],)

        image = data_generator(inputs)[0]
        wrapper = TracingAdapter(self, image, inference_func)
        wrapper.eval()

        # HACK: outputs_schema can only be obtained after running tracing, but
        # PredictorExportConfig requires a pre-defined postprocessing function, this
        # causes tracing to run twice.
        logger.info("tracing the model to get outputs_schema ...")
        with torch.no_grad(), patch_builtin_len():
            _ = torch.jit.trace(wrapper, (image,))
        outputs_schema_json = json.dumps(
            wrapper.outputs_schema, default=dataclass_object_dump
        )

        return PredictorExportConfig(
            model=wrapper,
            data_generator=data_generator,
            preprocess_info=FuncInfo.gen_func_info(
                D2TracingAdapterPreprocessFunc, params={}
            ),
            postprocess_info=FuncInfo.gen_func_info(
                D2TracingAdapterPostFunc,
                params={"outputs_schema_json": outputs_schema_json},
            ),
        )

    if cfg.MODEL.META_ARCHITECTURE in META_ARCH_CAFFE2_EXPORT_TYPE_MAP:
        C2MetaArch = META_ARCH_CAFFE2_EXPORT_TYPE_MAP[cfg.MODEL.META_ARCHITECTURE]
        c2_compatible_model = C2MetaArch(cfg, self)

        preprocess_info = FuncInfo.gen_func_info(
            D2Caffe2MetaArchPreprocessFunc,
            params=D2Caffe2MetaArchPreprocessFunc.get_params(cfg, c2_compatible_model),
        )
        postprocess_info = FuncInfo.gen_func_info(
            D2Caffe2MetaArchPostprocessFunc,
            params=D2Caffe2MetaArchPostprocessFunc.get_params(cfg, c2_compatible_model),
        )

        preprocess_func = preprocess_info.instantiate()

        return PredictorExportConfig(
            model=c2_compatible_model,
            # Caffe2MetaArch takes a single tuple as input (which is the return of
            # preprocess_func), data_generator requires all positional args as a tuple.
            data_generator=lambda x: (preprocess_func(x),),
            preprocess_info=preprocess_info,
            postprocess_info=postprocess_info,
        )

    raise NotImplementedError("Can't determine prepare_for_tracing!")