def _internal_maybe_mirror_and_pred_2D(self, x, num_repeats, mirror_axes, do_mirroring=True, mult=None): with torch.no_grad(): x_torch = torch.from_numpy(x).float() if self.get_device() == "cpu": x_torch = x_torch.cpu() else: x_torch = x_torch.cuda(self.get_device()) result_torch = torch.zeros([1, self.num_classes] + list(x.shape[2:])).float() if self.get_device() == "cpu": result_torch = result_torch.cpu() else: result_torch = result_torch.cuda(self.get_device()) num_results = num_repeats if do_mirroring: mirror_idx = 4 num_results *= 2**len(mirror_axes) else: mirror_idx = 1 for i in range(num_repeats): for m in range(mirror_idx): if m == 0: pred = self.inference_apply_nonlin(self(x_torch)) result_torch += 1 / num_results * pred if m == 1 and (1 in mirror_axes): pred = self.inference_apply_nonlin( self(flip(x_torch, 3))) result_torch += 1 / num_results * flip(pred, 3) if m == 2 and (0 in mirror_axes): pred = self.inference_apply_nonlin( self(flip(x_torch, 2))) result_torch += 1 / num_results * flip(pred, 2) if m == 3 and (0 in mirror_axes) and (1 in mirror_axes): pred = self.inference_apply_nonlin( self(flip(flip(x_torch, 3), 2))) result_torch += 1 / num_results * flip( flip(pred, 3), 2) if mult is not None: result_torch[:, :] *= mult return result_torch.detach().cpu().numpy()
def _internal_maybe_mirror_and_pred_2D( self, x: Union[np.ndarray, torch.tensor], mirror_axes: tuple, do_mirroring: bool = True, mult: np.ndarray or torch.tensor = None) -> torch.tensor: # everything in here takes place on the GPU. If x and mult are not yet on GPU this will be taken care of here # we now return a cuda tensor! Not numpy array! assert len(x.shape) == 4, 'x must be (b, c, x, y)' with torch.no_grad(): x = to_cuda(maybe_to_torch(x), gpu_id=self.get_device()) result_torch = torch.zeros( [x.shape[0], self.num_classes] + list(x.shape[2:]), dtype=torch.float).cuda(self.get_device(), non_blocking=True) if mult is not None: mult = to_cuda(maybe_to_torch(mult), gpu_id=self.get_device()) if do_mirroring: mirror_idx = 4 num_results = 2**len(mirror_axes) else: mirror_idx = 1 num_results = 1 for m in range(mirror_idx): if m == 0: pred = self.inference_apply_nonlin(self(x)) result_torch += 1 / num_results * pred if m == 1 and (1 in mirror_axes): pred = self.inference_apply_nonlin(self(flip(x, 3))) result_torch += 1 / num_results * flip(pred, 3) if m == 2 and (0 in mirror_axes): pred = self.inference_apply_nonlin(self(flip(x, 2))) result_torch += 1 / num_results * flip(pred, 2) if m == 3 and (0 in mirror_axes) and (1 in mirror_axes): pred = self.inference_apply_nonlin( self(flip(flip(x, 3), 2))) result_torch += 1 / num_results * flip(flip(pred, 3), 2) if mult is not None: result_torch[:, :] *= mult return result_torch
def _internal_maybe_mirror_and_pred_2D(self, x, num_repeats, mirror_axes, do_mirroring=True, mult=None): # everything in here takes place on the GPU. If x and mult are not yet on GPU this will be taken care of here # we now return a cuda tensor! Not numpy array! with torch.no_grad(): x = to_cuda(maybe_to_torch(x), gpu_id=self.get_device()) mult = to_cuda(maybe_to_torch(mult), gpu_id=self.get_device()) result_torch = torch.zeros( [x.shape[0], self.num_classes] + list(x.shape[2:]), dtype=torch.float).cuda(self.get_device(), non_blocking=True) num_results = num_repeats if do_mirroring: mirror_idx = 4 num_results *= 2**len(mirror_axes) else: mirror_idx = 1 for i in range(num_repeats): for m in range(mirror_idx): if m == 0: pred = self.inference_apply_nonlin(self(x)) result_torch += 1 / num_results * pred if m == 1 and (1 in mirror_axes): pred = self.inference_apply_nonlin(self(flip(x, 3))) result_torch += 1 / num_results * flip(pred, 3) if m == 2 and (0 in mirror_axes): pred = self.inference_apply_nonlin(self(flip(x, 2))) result_torch += 1 / num_results * flip(pred, 2) if m == 3 and (0 in mirror_axes) and (1 in mirror_axes): pred = self.inference_apply_nonlin( self(flip(flip(x, 3), 2))) result_torch += 1 / num_results * flip( flip(pred, 3), 2) if mult is not None: result_torch[:, :] *= mult return result_torch