def _test_model(self, config_path, inference_func, batch=1): model = model_zoo.get(config_path, trained=True) image = get_sample_coco_image() inputs = tuple(image.clone() for _ in range(batch)) wrapper = TracingAdapter(model, inputs, inference_func) wrapper.eval() with torch.no_grad(): # trace with smaller images, and the trace must still work trace_inputs = tuple( nn.functional.interpolate( image, scale_factor=random.uniform(0.5, 0.7)) for _ in range(batch)) traced_model = torch.jit.trace(wrapper, trace_inputs) outputs = inference_func(model, *inputs) traced_outputs = wrapper.outputs_schema(traced_model(*inputs)) if batch > 1: for output, traced_output in zip(outputs, traced_outputs): assert_instances_allclose(output, traced_output, size_as_tensor=True) else: assert_instances_allclose(outputs, traced_outputs, size_as_tensor=True)
def _test_model(self, config_path, inference_func): model = model_zoo.get(config_path, trained=True) image = get_sample_coco_image() class Wrapper(nn.ModuleList): # a wrapper to make the model traceable def forward(self, image): outputs = inference_func(self[0], image) flattened_outputs, schema = flatten_to_tuple(outputs) if not hasattr(self, "schema"): self.schema = schema return flattened_outputs def rebuild(self, flattened_outputs): return self.schema(flattened_outputs) wrapper = Wrapper([model]) wrapper.eval() with torch.no_grad(), patch_builtin_len(): small_image = nn.functional.interpolate(image, scale_factor=0.5) # trace with a different image, and the trace must still work traced_model = torch.jit.trace(wrapper, (small_image,)) output = inference_func(model, image) traced_output = wrapper.rebuild(traced_model(image)) assert_instances_allclose(output, traced_output, size_as_tensor=True)
def _test_model(self, config_path, inference_func): model = model_zoo.get(config_path, trained=True) image = get_sample_coco_image() wrapper = TracingAdapter(model, image, inference_func) wrapper.eval() with torch.no_grad(): small_image = nn.functional.interpolate(image, scale_factor=0.5) # trace with a different image, and the trace must still work traced_model = torch.jit.trace(wrapper, (small_image,)) output = inference_func(model, image) traced_output = wrapper.outputs_schema(traced_model(image)) assert_instances_allclose(output, traced_output, size_as_tensor=True)
def _test_model(self, config_path, WrapperCls): # TODO wrapper should be handled by export API in the future model = model_zoo.get(config_path, trained=True) image = get_sample_coco_image() model = WrapperCls([model]) model.eval() with torch.no_grad(), patch_builtin_len(): small_image = nn.functional.interpolate(image, scale_factor=0.5) # trace with a different image, and the trace must still work traced_model = torch.jit.trace(model, (small_image, )) output = WrapperCls.convert_output(model(image)) traced_output = WrapperCls.convert_output(traced_model(image)) assert_instances_allclose(output, traced_output)
def _test_retinanet_model(self, config_path): model = model_zoo.get(config_path, trained=True) model.eval() fields = { "pred_boxes": Boxes, "scores": Tensor, "pred_classes": Tensor, } script_model = export_torchscript_with_instances(model, fields) img = get_sample_coco_image() inputs = [{"image": img}] with torch.no_grad(): instance = model(inputs)[0]["instances"] scripted_instance = convert_scripted_instances(script_model(inputs)[0]) scripted_instance = detector_postprocess(scripted_instance, img.shape[1], img.shape[2]) assert_instances_allclose(instance, scripted_instance)
def _test_rcnn_model(self, config_path): model = model_zoo.get(config_path, trained=True) model.eval() fields = { "proposal_boxes": Boxes, "objectness_logits": Tensor, "pred_boxes": Boxes, "scores": Tensor, "pred_classes": Tensor, "pred_masks": Tensor, } script_model = export_torchscript_with_instances(model, fields) inputs = [{"image": get_sample_coco_image()}] with torch.no_grad(): instance = model.inference(inputs, do_postprocess=False)[0] scripted_instance = script_model.inference(inputs, do_postprocess=False)[0] assert_instances_allclose(instance, scripted_instance)
def _test_model(self, config_path, device="cpu"): # requires extra dependencies from detectron2.export import Caffe2Model, add_export_config, Caffe2Tracer cfg = model_zoo.get_config(config_path) add_export_config(cfg) cfg.MODEL.DEVICE = device model = model_zoo.get(config_path, trained=True, device=device) inputs = [{"image": get_sample_coco_image()}] c2_model = Caffe2Tracer(cfg, model, copy.deepcopy(inputs)).export_caffe2() with tempfile.TemporaryDirectory(prefix="detectron2_unittest") as d: c2_model.save_protobuf(d) c2_model.save_graph(os.path.join(d, "test.svg"), inputs=copy.deepcopy(inputs)) c2_model = Caffe2Model.load_protobuf(d) c2_model(inputs)[0]["instances"]
def _test_model(self, config_path, device="cpu"): cfg = model_zoo.get_config(config_path) cfg.MODEL.DEVICE = device model = model_zoo.get(config_path, trained=True, device=device) inputs = [{"image": get_sample_coco_image()}] tracer = Caffe2Tracer(cfg, model, copy.deepcopy(inputs)) with tempfile.TemporaryDirectory(prefix="detectron2_unittest") as d: if not os.environ.get("CI"): # This requires onnx, which is not yet available on public CI c2_model = tracer.export_caffe2() c2_model.save_protobuf(d) c2_model.save_graph(os.path.join(d, "test.svg"), inputs=copy.deepcopy(inputs)) c2_model = Caffe2Model.load_protobuf(d) c2_model(inputs)[0]["instances"] ts_model = tracer.export_torchscript() ts_model.save(os.path.join(d, "model.ts"))
def _test_rcnn_model(self, config_path): model = model_zoo.get(config_path, trained=True) model.eval() fields = { "proposal_boxes": Boxes, "objectness_logits": Tensor, "pred_boxes": Boxes, "scores": Tensor, "pred_classes": Tensor, "pred_masks": Tensor, } script_model = scripting_with_instances(model, fields) # Test that batch inference with different shapes are supported image = get_sample_coco_image() small_image = nn.functional.interpolate(image, scale_factor=0.5) inputs = [{"image": image}, {"image": small_image}] with torch.no_grad(): instance = model.inference(inputs, do_postprocess=False)[0] scripted_instance = script_model.inference(inputs, do_postprocess=False)[0] assert_instances_allclose(instance, scripted_instance)
def _test_model(self, config_path, inference_func, batch=1): model = model_zoo.get(config_path, trained=True) image = get_sample_coco_image() inputs = tuple(image.clone() for _ in range(batch)) wrapper = TracingAdapter(model, inputs, inference_func) wrapper.eval() with torch.no_grad(): # trace with smaller images, and the trace must still work trace_inputs = tuple( nn.functional.interpolate(image, scale_factor=random.uniform(0.5, 0.7)) for _ in range(batch) ) traced_model = torch.jit.trace(wrapper, trace_inputs) testing_devices = self._get_device_casting_test_cases(model) # save and load back the model in order to show traceback of TorchScript with tempfile.TemporaryDirectory(prefix="detectron2_test") as d: basename = "model" jitfile = f"{d}/{basename}.jit" torch.jit.save(traced_model, jitfile) traced_model = torch.jit.load(jitfile) if any(device and "cuda" in device for device in testing_devices): self._check_torchscript_no_hardcoded_device(jitfile, d, "cuda") for device in testing_devices: print(f"Testing casting to {device} for inference (traced on {model.device}) ...") with torch.no_grad(): outputs = inference_func(copy.deepcopy(model).to(device), *inputs) traced_outputs = wrapper.outputs_schema(traced_model.to(device)(*inputs)) if batch > 1: for output, traced_output in zip(outputs, traced_outputs): assert_instances_allclose(output, traced_output, size_as_tensor=True) else: assert_instances_allclose(outputs, traced_outputs, size_as_tensor=True)