def test_rgb_to_grayscale(self): img_tensor = torch.randint(0, 255, (3, 16, 16), dtype=torch.uint8) grayscale_tensor = F_t.rgb_to_grayscale(img_tensor).to(int) grayscale_pil_img = torch.tensor( np.array(F.to_grayscale(F.to_pil_image(img_tensor)))).to(int) max_diff = (grayscale_tensor - grayscale_pil_img).abs().max() self.assertLess(max_diff, 1.0001)
def test_rgb_to_grayscale(self): script_rgb_to_grayscale = torch.jit.script(F_t.rgb_to_grayscale) img_tensor = torch.randint(0, 255, (3, 16, 16), dtype=torch.uint8) img_tensor_clone = img_tensor.clone() grayscale_tensor = F_t.rgb_to_grayscale(img_tensor).to(int) grayscale_pil_img = torch.tensor(np.array(F.to_grayscale(F.to_pil_image(img_tensor)))).to(int) max_diff = (grayscale_tensor - grayscale_pil_img).abs().max() self.assertLess(max_diff, 1.0001) self.assertTrue(torch.equal(img_tensor, img_tensor_clone)) # scriptable function test grayscale_script = script_rgb_to_grayscale(img_tensor).to(int) self.assertTrue(torch.equal(grayscale_script, grayscale_tensor))
def convert_image_color_space_tensor( image: torch.Tensor, old_color_space: ColorSpace, new_color_space: ColorSpace) -> torch.Tensor: if new_color_space == old_color_space: return image.clone() if old_color_space == ColorSpace.GRAYSCALE: image = _grayscale_to_rgb_tensor(image) if new_color_space == ColorSpace.GRAYSCALE: image = _FT.rgb_to_grayscale(image) return image
def generate_poisson_noise_pt(img, scale=1.0, gray_noise=0): """Generate a batch of poisson noise (PyTorch version) Args: img (Tensor): Input image, shape (b, c, h, w), range [0, 1], float32. scale (float | Tensor): Noise scale. Number or Tensor with shape (b). Default: 1.0. gray_noise (float | Tensor): 0-1 number or Tensor with shape (b). 0 for False, 1 for True. Default: 0. Returns: (Tensor): Returned noisy image, shape (b, c, h, w), range[0, 1], float32. """ b, _, h, w = img.size() if isinstance(gray_noise, (float, int)): cal_gray_noise = gray_noise > 0 else: gray_noise = gray_noise.view(b, 1, 1, 1) cal_gray_noise = torch.sum(gray_noise) > 0 if cal_gray_noise: img_gray = rgb_to_grayscale(img, num_output_channels=1) # round and clip image for counting vals correctly img_gray = torch.clamp((img_gray * 255.0).round(), 0, 255) / 255. # use for-loop to get the unique values for each sample vals_list = [len(torch.unique(img_gray[i, :, :, :])) for i in range(b)] vals_list = [2**np.ceil(np.log2(vals)) for vals in vals_list] vals = img_gray.new_tensor(vals_list).view(b, 1, 1, 1) out = torch.poisson(img_gray * vals) / vals noise_gray = out - img_gray noise_gray = noise_gray.expand(b, 3, h, w) # always calculate color noise # round and clip image for counting vals correctly img = torch.clamp((img * 255.0).round(), 0, 255) / 255. # use for-loop to get the unique values for each sample vals_list = [len(torch.unique(img[i, :, :, :])) for i in range(b)] vals_list = [2**np.ceil(np.log2(vals)) for vals in vals_list] vals = img.new_tensor(vals_list).view(b, 1, 1, 1) out = torch.poisson(img * vals) / vals noise = out - img if cal_gray_noise: noise = noise * (1 - gray_noise) + noise_gray * gray_noise if not isinstance(scale, (float, int)): scale = scale.view(b, 1, 1, 1) return noise * scale