def _sliding_window_processor(engine, batch): net.eval() img, seg, meta_data = batch with torch.no_grad(): seg_probs = sliding_window_inference(img, roi_size, sw_batch_size, net, device) return seg_probs, seg.to(device)
def _sliding_window_processor(_engine, batch): net.eval() img, seg, meta_data = batch with torch.no_grad(): seg_probs = sliding_window_inference(img, roi_size, sw_batch_size, lambda x: net(x)[0], device) return predict_segmentation(seg_probs)
def test_sliding_window_default(self, image_shape, roi_shape, sw_batch_size): inputs = np.ones(image_shape) device = torch.device("cpu:0") def compute(data): return data.to(device) + 1 result = sliding_window_inference(inputs, roi_shape, sw_batch_size, compute, device) expected_val = np.ones(image_shape, dtype=np.float32) + 1 self.assertTrue(np.allclose(result.numpy(), expected_val))
def _sliding_window_processor(engine, batch): net.eval() with torch.no_grad(): seg_probs = sliding_window_inference(batch['img'], roi_size, sw_batch_size, net, device) return seg_probs, batch['seg'].to(device)