def _test_model(self, config_path, inference_func, batch=1): model = model_zoo.get(config_path, trained=True) image = get_sample_coco_image() inputs = tuple(image.clone() for _ in range(batch)) wrapper = TracingAdapter(model, inputs, inference_func) wrapper.eval() with torch.no_grad(): # trace with smaller images, and the trace must still work trace_inputs = tuple( nn.functional.interpolate( image, scale_factor=random.uniform(0.5, 0.7)) for _ in range(batch)) traced_model = torch.jit.trace(wrapper, trace_inputs) outputs = inference_func(model, *inputs) traced_outputs = wrapper.outputs_schema(traced_model(*inputs)) if batch > 1: for output, traced_output in zip(outputs, traced_outputs): assert_instances_allclose(output, traced_output, size_as_tensor=True) else: assert_instances_allclose(outputs, traced_outputs, size_as_tensor=True)
def _test_model(self, config_path, inference_func): model = model_zoo.get(config_path, trained=True) image = get_sample_coco_image() wrapper = TracingAdapter(model, image, inference_func) wrapper.eval() with torch.no_grad(): small_image = nn.functional.interpolate(image, scale_factor=0.5) # trace with a different image, and the trace must still work traced_model = torch.jit.trace(wrapper, (small_image,)) output = inference_func(model, image) traced_output = wrapper.outputs_schema(traced_model(image)) assert_instances_allclose(output, traced_output, size_as_tensor=True)
def _test_model(self, config_path, inference_func, batch=1): model = model_zoo.get(config_path, trained=True) image = get_sample_coco_image() inputs = tuple(image.clone() for _ in range(batch)) wrapper = TracingAdapter(model, inputs, inference_func) wrapper.eval() with torch.no_grad(): # trace with smaller images, and the trace must still work trace_inputs = tuple( nn.functional.interpolate(image, scale_factor=random.uniform(0.5, 0.7)) for _ in range(batch) ) traced_model = torch.jit.trace(wrapper, trace_inputs) testing_devices = self._get_device_casting_test_cases(model) # save and load back the model in order to show traceback of TorchScript with tempfile.TemporaryDirectory(prefix="detectron2_test") as d: basename = "model" jitfile = f"{d}/{basename}.jit" torch.jit.save(traced_model, jitfile) traced_model = torch.jit.load(jitfile) if any(device and "cuda" in device for device in testing_devices): self._check_torchscript_no_hardcoded_device(jitfile, d, "cuda") for device in testing_devices: print(f"Testing casting to {device} for inference (traced on {model.device}) ...") with torch.no_grad(): outputs = inference_func(copy.deepcopy(model).to(device), *inputs) traced_outputs = wrapper.outputs_schema(traced_model.to(device)(*inputs)) if batch > 1: for output, traced_output in zip(outputs, traced_outputs): assert_instances_allclose(output, traced_output, size_as_tensor=True) else: assert_instances_allclose(outputs, traced_outputs, size_as_tensor=True)