Exemplo n.º 1
0
    def __getitem__(self, i):
        g = imread(random.choice(self.d)) / 255
        g = rgb2gray(g).astype(np.float32)
        g = rand_crop(g, self.gs + self.ks - 1)
        g = torch.from_numpy(g).view(1, *g.shape)

        k = gen_kernel(self.ks).astype(np.float32)
        k = torch.from_numpy(k)
        s = random.uniform(1, 3)
        s = torch.tensor((s, ), dtype=torch.float)

        y = F.conv2d(g.view(1, *g.shape), k.view(1, 1, *k.shape)).squeeze(0)
        assert y.shape[-1] == self.gs
        # noise
        if self.noise:
            y += torch.randn_like(y) * s / 255
        # show(y[0])
        # edgetaping, todo convert to torch, and move to model stage
        if self.edgetaper:
            y = y.permute(1, 2, 0)
            y = to_tensor(
                edgetaper(pad_for_kernel(y.numpy(), k.numpy(), "edge"),
                          k.numpy())).astype(np.float32)
            y = torch.from_numpy(y).squeeze(-1)
        g = center_crop(g, self.gs)
        # [1,284,284] [1,320,320] [37,37] [1]
        return g, y, k, s
Exemplo n.º 2
0
 def __getitem__(self, i):
     print(self.d[i])
     mat = loadmat(self.d[i])
     g = mat["x"].astype(np.float32)
     y = mat["y"].astype(np.float32)
     k = mat["f"].astype(np.float32)
     # flip kernel
     k = k[::-1, ::-1]
     k = np.clip(k, 0, 1)
     k /= np.sum(k)
     y = to_tensor(edgetaper(pad_for_kernel(y, k, "edge"),
                             k)).astype(np.float32)
     g = torch.from_numpy(g).unsqueeze(0)
     y = torch.from_numpy(y).squeeze(-1)
     k = torch.from_numpy(k)
     s = torch.tensor((1.5, ), dtype=torch.float)
     return g, y, k, s
Exemplo n.º 3
0
 def __getitem__(self, i):
     i = self.d[i]
     print(i)
     p = str(i.name)
     g, k = p.split("_")[:2]
     g = imread(i.parent / f"img{g}_groundtruth_img.png")
     k = imread(i.parent / f"kernel{k}_groundtruth_kernel.png")
     y = imread(i)
     [g, k, y] = [i.astype(np.float32) / 255 for i in [g, k, y]]
     # k = k[::-1, ::-1]
     k = np.clip(k, 0, 1)
     k /= np.sum(k)
     y = to_tensor(edgetaper(pad_for_kernel(y, k, "edge"),
                             k)).astype(np.float32)
     g = torch.from_numpy(g).unsqueeze(0)
     y = torch.from_numpy(y).squeeze(-1)
     k = torch.from_numpy(k)
     s = torch.tensor((2.55, ), dtype=torch.float)
     return g, y, k, s
Exemplo n.º 4
0
 def __getitem__(self, i):
     # [img1xkernel1, img1xkernel2,...,img2xkernel1]
     g = imread(self.d[i // 8]) / 255
     g = rgb2gray(g).astype(np.float32)
     k = self.k[i % 8]
     g = torch.from_numpy(g)
     s = torch.tensor((self.s, ), dtype=torch.float)
     g = g.view(1, *g.shape)
     # blur
     y = F.conv2d(g.view(1, *g.shape), k.view(1, 1, *k.shape))
     g = center_crop(g, 250)
     y = center_crop(y, 250)[0]
     # show(torch.cat((g.detach().cpu()[0], y.detach().cpu()[0,0]), 0))
     # noise
     # y += torch.randn_like(y) * s / 255
     # edgetaping, todo convert to torch, and move to model stage
     y = y.permute(1, 2, 0)
     y = to_tensor(
         edgetaper(pad_for_kernel(y.numpy(), k.numpy(), "edge"),
                   k.numpy())).astype(np.float32)
     y = torch.from_numpy(y).squeeze(-1)
     # [1,250,250] [1,267,267] [1,13,13] [1]
     return g, y, k, s