def lucid_transforms(img, jitter=None, scale=.5, degrees=45, **kwargs): h,w = img.shape[-2], img.shape[-1] if jitter is None: jitter = min(h,w)//2 fastai_image = vision.Image(img.squeeze()) # pad fastai_image._flow = gpu_affine_grid(fastai_image.shape) vision.transform.pad()(fastai_image, jitter) # jitter first_jitter = int((jitter*(2/3))) vision.transform.crop_pad()(fastai_image, (h+first_jitter,w+first_jitter), row_pct=np.random.rand(), col_pct=np.random.rand()) # scale percent = scale * 100 # scale up to integer to avoid float repr errors scale_factors = [(100 - percent + percent/5. * i)/100 for i in range(11)] rand_scale = scale_factors[int(np.random.rand()*len(scale_factors))] fastai_image._flow = gpu_affine_grid(fastai_image.shape) vision.transform.zoom()(fastai_image, rand_scale) # rotate rotate_factors = list(range(-degrees, degrees+1)) + degrees//2 * [0] rand_rotate = rotate_factors[int(np.random.rand()*len(rotate_factors))] fastai_image._flow = gpu_affine_grid(fastai_image.shape) vision.transform.rotate()(fastai_image, rand_rotate) # jitter vision.transform.crop_pad()(fastai_image, (h,w), row_pct=np.random.rand(), col_pct=np.random.rand()) return fastai_image.data[None,:]
def visualize_feature(model, layer, feature, start_image=None, last_hook_out=None, size=200, steps=500, lr=0.004, weight_decay=0.1, grad_clip=1, debug=False, frames=10, show=True, **kwargs): h,w = size if type(size) is tuple else (size,size) if start_image is not None: fastai_image = vision.Image(start_image.squeeze()) fastai_image._flow = gpu_affine_grid((3,h,w)) # resize img_buf = fastai_image.data[None,:] img_buf = normalize(img_buf) img_buf = rgb_to_lucid_colorspace(img_buf) img_buf = rgb_to_fft(h, w, img_buf, **kwargs) else: img_buf = init_fft_buf(h, w, **kwargs) img_buf.requires_grad_() opt = torch.optim.AdamW([img_buf], lr=lr, weight_decay=weight_decay) hook_out = None def callback(m, i, o): nonlocal hook_out hook_out = o hook = layer.register_forward_hook(callback) for i in range(1,steps+1): opt.zero_grad() img = fft_to_rgb(h, w, img_buf, **kwargs) img = lucid_colorspace_to_rgb(img) stats = tensor_stats(img) img = torch.sigmoid(img)*2 - 1 img = lucid_transforms(img, **kwargs) model(img.cuda()) if feature is None: loss = -1 * hook_out[0].pow(2).mean() else: loss = -1 * hook_out[0][feature].mean() if last_hook_out is not None: simularity = cossim(hook_out[0], last_hook_out, **kwargs) loss = loss + loss * simularity loss.backward() torch.nn.utils.clip_grad_norm_(img_buf,grad_clip) opt.step() if debug and (i)%(int(steps/frames))==0: clear_output(wait=True) label = f"step: {i} loss: {loss:.2f} stats:{stats}" show_rgb(image_buf_to_rgb(h, w, img_buf, **kwargs), label=label, **kwargs) hook.remove() retval = image_buf_to_rgb(h, w, img_buf, **kwargs) if show: if not debug: show_rgb(retval, **kwargs) return retval, hook_out[0].clone().detach()
def open_nii_image(fn): x = None if str(fn).split('.')[-1] == 'nrrd': _nrrd = nrrd.read(str(fn)) x = _nrrd[0] else: load_data = LoadNifti(image_only=True) x = load_data(fn) if x is None: raise TypeError return fvision.Image(torch.Tensor(x[None]))
def Forward(inputImgName: str, Color): outputImgName = inputImgName.with_name(str(inputImgName.stem) + "-seg.png") Img = fv.open_image(inputImgName) originalSize = Img.size Img = Img.resize(500) Res = Learn.predict(Img)[0] # Colorization Mask = (Res.data == 255) R, G, B, A = [ torch.zeros((1, 500, 500), dtype=torch.uint8) for _ in range(4) ] R[Mask], G[Mask], B[Mask] = Color A[Mask] = 255 ColorMask = fv.Image(torch.cat([R, G, B, A])) Pil_Img = to_pil(ColorMask.data.detach().cpu().type(torch.ByteTensor)) Pil_Img = Pil_Img.resize(originalSize[::-1]) Pil_Img.save(outputImgName) return outputImgName
def open_tiff(fn: faiv.PathOrStr) -> faiv.Image: """ open a 1 channel tif image and transform it into a fastai image """ return faiv.Image( torch.Tensor(np.asarray(Image.open(fn), dtype=np.float32)[None, ...]))
def open_nii(fn: str) -> faiv.Image: """ Return fastai `Image` object created from NIfTI image in file `fn`.""" x = nib.load(str(fn)).get_data() return faiv.Image(torch.Tensor(x))
def reconstruct(self, t: torch.Tensor): return ImageTuple(faiv.Image(t[0]), faiv.Image(t[1]))
def reconstruct(self, t: torch.Tensor): if len(t.size()) == 0: return t return ImageTuple(faiv.Image(t[0]), faiv.Image(t[1]))
def to_one(self): return faiv.Image(torch.cat(self.data, 2))
def open_tiff(fn: str) -> faiv.Image: """ Return fastai `Image` object created from Tiff image in file `fn`.""" return faiv.Image( torch.Tensor(np.asarray(Image.open(fn), dtype=np.float32)[None, ...]))