Ejemplo n.º 1
0
 def inpaint_online(self, current_image: torch.Tensor,
                    current_mask: torch.Tensor) -> torch.Tensor:
     current_mask = invert_mask(current_mask)
     current_image = normalize(current_image)
     current_image_masked = mask_tensor(current_image, current_mask)
     current_image_filled = self.model(current_image_masked, current_mask)
     current_image_result = denormalize(current_mask * current_image +
                                        (invert_mask(current_mask)) *
                                        current_image_filled)
     return current_image_result
Ejemplo n.º 2
0
    def inpaint_online(self, current_image: torch.Tensor,
                       current_mask: torch.Tensor) -> torch.Tensor:
        current_image = normalize(current_image)
        current_image_masked = mask_tensor(current_image,
                                           invert_mask(current_mask))

        if not self.previous_available:
            self.masks = (4 * self.t_stride + 1) * [current_mask]
            self.masked_images = (4 * self.t_stride +
                                  1) * [current_image_masked]
            self.ones = torch.ones(current_mask.size()).cuda()
            self.prev_mask = current_mask
            self.prev_output = current_image_masked
            self.previous_available = True
        elif self.t:
            self.masks.pop(0)
            self.masks = self.masks[2 * self.t_stride:] + (2 * self.t_stride +
                                                           1) * [current_mask]
            self.masked_images.pop(0)
            self.masked_images = self.masked_images[2 * self.t_stride:] + (
                2 * self.t_stride + 1) * [current_image_masked]

        masks = torch.stack(self.masks[::self.t_stride], dim=2)
        masked_inputs = torch.stack(self.masked_images[::self.t_stride], dim=2)
        prev_feed = torch.cat(
            [self.prev_output, self.ones, self.ones * self.prev_mask], dim=1)

        result, _, self.lstm_state, _, _ = self.model(masked_inputs, masks,
                                                      self.lstm_state,
                                                      prev_feed, self.t)
        result = result.squeeze(2)
        self.lstm_state = repackage_hidden(self.lstm_state)
        self.t += 1

        self.prev_mask = current_mask * 0.5
        self.prev_output = result

        debug(current_image, '0_current_image', denormalize)
        debug(current_mask, '0_current_mask')
        debug(result, '1_result', denormalize)

        return denormalize(result)
Ejemplo n.º 3
0
from inpainting.external.baseline import BaselineModel
from inpainting.utils import mean_and_std, mask_tensor

parser = argparse.ArgumentParser()
parser.add_argument('opt_level', type=str, default='O1')
parser.add_argument('--batch-size', type=int, default=16)
parser.add_argument('--size', type=int, default=(256, 256))
opt = parser.parse_args()

image_transforms = transforms.Compose([
    transforms.Resize(opt.size, interpolation=Image.BILINEAR),
    transforms.ToTensor(),
    transforms.Normalize(*mean_and_std())
])
mask_transforms = transforms.Compose([
    transforms.Resize(opt.size, interpolation=Image.NEAREST),
    transforms.ToTensor()
])
image_dataset = ImageDataset(['data/raw/image/SmallPlaces2/data_large'],
                             transform=image_transforms)
mask_dataset = FileMaskDataset('data/raw/mask/demo', transform=mask_transforms)
dataset = InpaintingImageDataset(image_dataset, mask_dataset)
data_loader = DataLoader(dataset, batch_size=opt.batch_size, shuffle=False)

model = amp.initialize(BaselineModel().cuda().eval(), opt_level=opt.opt_level)
with torch.no_grad():
    image, mask = next(iter(data_loader))
    image, mask = image.cuda(), mask.cuda()
    image_masked = mask_tensor(image, mask)
    image_filled = model(image, mask)
Ejemplo n.º 4
0
    def inpaint_online(self, current_image: torch.Tensor,
                       current_mask: torch.Tensor) -> torch.Tensor:
        current_mask = invert_mask(current_mask)

        if self.previous_available:
            forward_flow = self.flow_model(self.previous_image, current_image)
            forward_flow_masked = mask_tensor(forward_flow, self.previous_mask)
            forward_flow_filled = fill_flow(forward_flow_masked,
                                            self.previous_mask)

            backward_flow = self.flow_model(current_image, self.previous_image)
            backward_flow_masked = mask_tensor(backward_flow, current_mask)
            backward_flow_filled = fill_flow(backward_flow_masked,
                                             current_mask)

            grid = make_grid(forward_flow.size(), normalized=False)
            backward_grid = warp_tensor(grid, backward_flow, mode='nearest')
            forward_grid = warp_tensor(backward_grid,
                                       forward_flow,
                                       mode='nearest')
            flow_propagation_error = forward_grid - grid
            connected_pixels_mask = (torch.norm(
                flow_propagation_error, 2, dim=1) <
                                     self.eps).float().unsqueeze(1)

            current_mask_warped = warp_tensor(connected_pixels_mask *
                                              self.previous_mask_result,
                                              backward_flow_filled,
                                              mode='nearest')
            current_image_warped = warp_tensor(self.previous_image_result,
                                               backward_flow_filled,
                                               mode='nearest')

            current_mask_result = current_mask + current_mask_warped * (
                invert_mask(current_mask))
            current_image_result = current_image * current_mask + current_image_warped * current_mask_warped * (
                invert_mask(current_mask))

            debug(self.previous_image, '0_previous_image')
            debug(self.previous_mask, '0_previous_mask')
            debug(self.previous_image_result, '0_previous_image_result')
            debug(self.previous_mask_result, '0_previous_mask_result')
            debug(current_image, '0_current_image')
            debug(current_mask, '0_current_mask')
            debug(forward_flow, '1_forward_flow')
            debug(forward_flow_masked, '1_forward_flow_masked')
            debug(forward_flow_filled, '1_forward_flow_filled')
            debug(backward_flow, '1_backward_flow')
            debug(backward_flow_masked, '1_backward_flow_masked')
            debug(backward_flow_filled, '1_backward_flow_filled')
            debug(flow_propagation_error, '2_flow_propagation_error')
            debug(connected_pixels_mask, '2_connected_pixels_mask')
            debug(current_image_warped, '3_current_image_warped')
            debug(current_mask_warped, '3_current_mask_warped')
            debug(current_image_result, '4_current_image_result')
            debug(current_mask_result, '4_current_mask_result')
        else:
            current_mask_result = current_mask
            current_image_result = mask_tensor(current_image, current_mask)

        current_image_result = super().inpaint_online(
            current_image_result, invert_mask(current_mask_result))

        self.previous_mask = current_mask
        self.previous_image = current_image
        self.previous_mask_result = current_mask_result
        self.previous_image_result = current_image_result
        self.previous_available = True

        return current_image_result