def resume(self, checkpoint_dir, iteration=0, test=False):
        # Load generators
        last_model_name = get_model_list(checkpoint_dir,
                                         "gen",
                                         iteration=iteration)
        self.netG.load_state_dict(torch.load(last_model_name))
        iteration = int(last_model_name[-11:-3])

        if not test:
            # Load discriminators
            last_model_name = get_model_list(checkpoint_dir,
                                             "dis",
                                             iteration=iteration)
            state_dict = torch.load(last_model_name)
            self.localD.load_state_dict(state_dict['localD'])
            self.globalD.load_state_dict(state_dict['globalD'])
            # Load optimizers
            state_dict = torch.load(
                os.path.join(checkpoint_dir, 'optimizer.pt'))
            self.optimizer_d.load_state_dict(state_dict['dis'])
            self.optimizer_g.load_state_dict(state_dict['gen'])

        print("Resume from {} at iteration {}".format(checkpoint_dir,
                                                      iteration))
        logger.info("Resume from {} at iteration {}".format(
            checkpoint_dir, iteration))

        return iteration
    def resume(self, checkpoint_dir, iteration=0, test=False):
        # Load generators
        last_model_name = get_model_list(checkpoint_dir,
                                         "gen",
                                         iteration=iteration)
        mp_loc = 'cpu' if not self.config['cuda'] else None
        self.netG.load_state_dict(
            torch.load(last_model_name, map_location=mp_loc))
        iteration = int(last_model_name[-11:-3])
        print("last model name is {}".format(last_model_name))
        if not test:
            # Load discriminators
            last_model_name = get_model_list(checkpoint_dir,
                                             "dis",
                                             iteration=iteration)
            # this function returns the path including filename of the latest checkpoint
            state_dict = torch.load(last_model_name, map_location=mp_loc)

            self.localD.load_state_dict(state_dict['localD'])
            self.globalD.load_state_dict(state_dict['globalD'])
            # Load optimizers
            state_dict = torch.load(os.path.join(checkpoint_dir,
                                                 'optimizer.pt'),
                                    map_location=mp_loc)
            self.optimizer_d.load_state_dict(state_dict['dis'])
            self.optimizer_g.load_state_dict(state_dict['gen'])

        print("Resume from {} at iteration {}".format(checkpoint_dir,
                                                      iteration))
        logger.info("Resume from {} at iteration {}".format(
            checkpoint_dir, iteration))

        return iteration
Ejemplo n.º 3
0
def loadGenerator(args):
    config = get_config(args.g_config)

    # CUDA configuration
    cuda = config['cuda']
    device_ids = config['gpu_ids']
    if cuda:
        os.environ['CUDA_VISIBLE_DEVICES'] = ','.join(
            str(i) for i in device_ids)
        device_ids = list(range(len(device_ids)))
        config['gpu_ids'] = device_ids
        cudnn.benchmark = True

    # Set checkpoint path
    if not args.checkpoint_path:
        checkpoint_path = os.path.join(
            'checkpoints', config['dataset_name'],
            config['mask_type'] + '_' + config['expname'])
    else:
        checkpoint_path = args.checkpoint_path

    # Define the trainer
    netG = Generator(config['netG'], cuda, device_ids).cuda()
    # Resume weight
    last_model_name = get_model_list(checkpoint_path,
                                     "gen",
                                     iteration=args.iter)
    model_iteration = int(last_model_name[-11:-3])
    netG.load_state_dict(torch.load(last_model_name))

    print("Configuration: {}".format(config))
    print("Resume from {} at iteration {}".format(checkpoint_path,
                                                  model_iteration))

    if cuda:
        netG = nn.parallel.DataParallel(netG, device_ids=device_ids)

    return netG
Ejemplo n.º 4
0
    def resume(self, checkpoint_dir, iteration=0, test=False):
        # Load generators
        last_model_name = get_model_list(checkpoint_dir,
                                         "parser",
                                         iteration=iteration)
        self.netParser.load_state_dict(torch.load(last_model_name))
        iteration = int(last_model_name[-11:-3])
        '''
        if not test:
            # Load optimizers

            #last_opt_name = torch.load(os.path.join(checkpoint_dir, 'parser_optimizer.pt'))

            opt_state_dict = torch.load(os.path.join(checkpoint_dir, 'parser_optimizer.pt'))
            self.optimizerSGD.load_state_dict(opt_state_dict)
        '''

        print("Resume from {} at iteration {}".format(checkpoint_dir,
                                                      iteration))
        logger.info("Resume from {} at iteration {}".format(
            checkpoint_dir, iteration))

        return iteration
Ejemplo n.º 5
0
def generate(img, img_mask_path, model_path):
    with torch.no_grad():   # enter no grad context
        if img_mask_path and is_image_file(img_mask_path):
            # Test a single masked image with a given mask
            x = Image.fromarray(img)
            mask = default_loader(img_mask_path)
            x = transforms.Resize(config['image_shape'][:-1])(x)
            x = transforms.CenterCrop(config['image_shape'][:-1])(x)
            mask = transforms.Resize(config['image_shape'][:-1])(mask)
            mask = transforms.CenterCrop(config['image_shape'][:-1])(mask)
            x = transforms.ToTensor()(x)
            mask = transforms.ToTensor()(mask)[0].unsqueeze(dim=0)
            x = normalize(x)
            x = x * (1. - mask)
            x = x.unsqueeze(dim=0)
            mask = mask.unsqueeze(dim=0)
        elif img_mask_path:
            raise TypeError("{} is not an image file.".format(img_mask_path))
        else:
            # Test a single ground-truth image with a random mask
            #ground_truth = default_loader(img_path)
            ground_truth = img
            ground_truth = transforms.Resize(config['image_shape'][:-1])(ground_truth)
            ground_truth = transforms.CenterCrop(config['image_shape'][:-1])(ground_truth)
            ground_truth = transforms.ToTensor()(ground_truth)
            ground_truth = normalize(ground_truth)
            ground_truth = ground_truth.unsqueeze(dim=0)
            bboxes = random_bbox(config, batch_size=ground_truth.size(0))
            x, mask = mask_image(ground_truth, bboxes, config)

        # Set checkpoint path
        if not model_path:
            checkpoint_path = os.path.join('checkpoints',
                                           config['dataset_name'],
                                           config['mask_type'] + '_' + config['expname'])
        else:
            checkpoint_path = model_path

        # Define the trainer
        netG = Generator(config['netG'], cuda, device_ids)
        # Resume weight
        last_model_name = get_model_list(checkpoint_path, "gen", iteration=0)
        
        if cuda:
            netG.load_state_dict(torch.load(last_model_name))
        else:
            netG.load_state_dict(torch.load(last_model_name, map_location='cpu'))
                                 
        model_iteration = int(last_model_name[-11:-3])
        print("Resume from {} at iteration {}".format(checkpoint_path, model_iteration))

        if cuda:
            netG = nn.parallel.DataParallel(netG, device_ids=device_ids)
            x = x.cuda()
            mask = mask.cuda()

        # Inference
        x1, x2, offset_flow = netG(x, mask)
        inpainted_result = x2 * mask + x * (1. - mask)
        inpainted_result =  from_torch_img_to_numpy(inpainted_result, 'output.png', padding=0, normalize=True)

        return inpainted_result
def main():
    args = parser.parse_args()
    config = get_config(args.config)

    # CUDA configuration
    cuda = config['cuda']
    device_ids = config['gpu_ids']
    if cuda:
        os.environ['CUDA_VISIBLE_DEVICES'] = ','.join(
            str(i) for i in device_ids)
        device_ids = list(range(len(device_ids)))
        config['gpu_ids'] = device_ids
        cudnn.benchmark = True

    print("Arguments: {}".format(args))

    # Set random seed
    if args.seed is None:
        args.seed = random.randint(1, 10000)
    print("Random seed: {}".format(args.seed))
    random.seed(args.seed)
    torch.manual_seed(args.seed)
    if cuda:
        torch.cuda.manual_seed_all(args.seed)

    print("Configuration: {}".format(config))

    try:  # for unexpected error logging
        with torch.no_grad():   # enter no grad context
            if is_image_file(args.image):
                if args.mask and is_image_file(args.mask):
                    # Test a single masked image with a given mask
                    x = default_loader(args.image)
                    mask = default_loader(args.mask)
                    x = transforms.Resize(config['image_shape'][:-1])(x)
                    x = transforms.CenterCrop(config['image_shape'][:-1])(x)
                    mask = transforms.Resize(config['image_shape'][:-1])(mask)
                    mask = transforms.CenterCrop(
                        config['image_shape'][:-1])(mask)
                    x = transforms.ToTensor()(x)
                    mask = transforms.ToTensor()(mask)[0].unsqueeze(dim=0)
                    x = normalize(x)
                    x = x * (1. - mask)
                    x = x.unsqueeze(dim=0)
                    mask = mask.unsqueeze(dim=0)
                elif args.mask:
                    raise TypeError(
                        "{} is not an image file.".format(args.mask))
                else:
                    # Test a single ground-truth image with a random mask
                    ground_truth = default_loader(args.image)
                    ground_truth = transforms.Resize(
                        config['image_shape'][:-1])(ground_truth)
                    ground_truth = transforms.CenterCrop(
                        config['image_shape'][:-1])(ground_truth)
                    ground_truth = transforms.ToTensor()(ground_truth)
                    ground_truth = normalize(ground_truth)
                    ground_truth = ground_truth.unsqueeze(dim=0)
                    bboxes = random_bbox(
                        config, batch_size=ground_truth.size(0))
                    x, mask = mask_image(ground_truth, bboxes, config)

                # Set checkpoint path
                if not args.checkpoint_path:
                    checkpoint_path = os.path.join('checkpoints',
                                                   config['dataset_name'],
                                                   config['mask_type'] + '_' + config['expname'])
                else:
                    checkpoint_path = args.checkpoint_path

                # Define the trainer
                netG = Generator(config['netG'], cuda, device_ids)
                # Resume weight
                last_model_name = get_model_list(
                    checkpoint_path, "gen", iteration=args.iter)
                netG.load_state_dict(torch.load(last_model_name))
                model_iteration = int(last_model_name[-11:-3])
                print("Resume from {} at iteration {}".format(
                    checkpoint_path, model_iteration))

                if cuda:
                    netG = nn.parallel.DataParallel(
                        netG, device_ids=device_ids)
                    x = x.cuda()
                    mask = mask.cuda()

                # Inference
                x1, x2, offset_flow = netG(x, mask)
                inpainted_result = x2 * mask + x * (1. - mask)

                vutils.save_image(inpainted_result, args.output,
                                  padding=0, normalize=True)
                print("Saved the inpainted result to {}".format(args.output))
                if args.flow:
                    vutils.save_image(offset_flow, args.flow,
                                      padding=0, normalize=True)
                    print("Saved offset flow to {}".format(args.flow))
            else:
                raise TypeError("{} is not an image file.".format)
        # exit no grad context
    except Exception as e:  # for unexpected error logging
        print("Error: {}".format(e))
        raise e
Ejemplo n.º 7
0
def main():
    args = parser.parse_args()
    config = get_config(args.config)

    # CUDA configuration
    cuda = config['cuda']
    device_ids = config['gpu_ids']
    if cuda:
        os.environ['CUDA_VISIBLE_DEVICES'] = ','.join(
            str(i) for i in device_ids)
        device_ids = list(range(len(device_ids)))
        config['gpu_ids'] = device_ids
        cudnn.benchmark = True

    # Set random seed
    if args.seed is None:
        args.seed = random.randint(1, 10000)
    print("Random seed: {}".format(args.seed))
    random.seed(args.seed)
    torch.manual_seed(args.seed)
    if cuda:
        torch.cuda.manual_seed_all(args.seed)

    chunker = ImageChunker(config['image_shape'][0], config['image_shape'][1],
                           args.overlap)
    try:  # for unexpected error logging
        with torch.no_grad():  # enter no grad context
            if is_image_file(args.image):
                print("Loading image...")
                imgs, masks = [], []
                img_ori = default_loader(args.image)
                img_w, img_h = img_ori.size
                # Load mask txt file
                fname = args.image.replace('.jpg', '.txt')
                bboxes, _ = load_bbox_txt(fname, img_w, img_h)
                mask_ori = create_mask(bboxes, img_w, img_h)
                chunked_images = chunker.dimension_preprocess(
                    np.array(deepcopy(img_ori)))
                chunked_masks = chunker.dimension_preprocess(
                    np.array(deepcopy(mask_ori)))
                for (x, msk) in zip(chunked_images, chunked_masks):
                    x = transforms.ToTensor()(x)
                    mask = transforms.ToTensor()(msk)[0].unsqueeze(dim=0)
                    # x = normalize(x)
                    x = x * (1. - mask)
                    x = x.unsqueeze(dim=0)
                    mask = mask.unsqueeze(dim=0)
                    imgs.append(x)
                    masks.append(mask)

                # Set checkpoint path
                if not args.checkpoint_path:
                    checkpoint_path = os.path.join(
                        'checkpoints', config['dataset_name'],
                        config['mask_type'] + '_' + config['expname'])
                else:
                    checkpoint_path = args.checkpoint_path

                # Define the trainer
                netG = Generator(config['netG'], cuda, device_ids)
                # Resume weight
                last_model_name = get_model_list(checkpoint_path,
                                                 "gen",
                                                 iteration=args.iter)
                netG.load_state_dict(torch.load(last_model_name))
                model_iteration = int(last_model_name[-11:-3])
                print("Resume from {} at iteration {}".format(
                    checkpoint_path, model_iteration))

                pred_imgs = []
                for (x, mask) in zip(imgs, masks):
                    if torch.max(mask) == 1:
                        if cuda:
                            netG = nn.parallel.DataParallel(
                                netG, device_ids=device_ids)
                            x = x.cuda()
                            mask = mask.cuda()

                        # Inference
                        x1, x2, offset_flow = netG(x, mask)
                        inpainted_result = x2 * mask + x * (1. - mask)
                        inpainted_result = inpainted_result.squeeze(
                            dim=0).permute(1, 2, 0).cpu()
                        pred_imgs.append(inpainted_result.numpy())
                    else:
                        pred_imgs.append(
                            x.squeeze(dim=0).permute(1, 2, 0).numpy())

                pred_imgs = np.asarray(pred_imgs, dtype=np.float32)
                reconstructed_image = chunker.dimension_postprocess(
                    pred_imgs, np.array(img_ori))
                # plt.imshow(reconstructed_image); plt.show()
                reconstructed_image = torch.tensor(
                    reconstructed_image).permute(2, 0, 1).unsqueeze(dim=0)
                vutils.save_image(reconstructed_image,
                                  args.output,
                                  padding=0,
                                  normalize=True)
                print("Saved the inpainted result to {}".format(args.output))
                if args.flow:
                    vutils.save_image(offset_flow,
                                      args.flow,
                                      padding=0,
                                      normalize=True)
                    print("Saved offset flow to {}".format(args.flow))
            else:
                raise TypeError("{} is not an image file.".format)
        # exit no grad context
    except Exception as e:  # for unexpected error logging
        print("Error: {}".format(e))
        raise e
Ejemplo n.º 8
0
def main():
    # if you want to see output images with colored segmentation?
    VIEW_COLORED_SEGMENTATION = False

    # Config file reading
    args = parser.parse_args()
    config = get_config(args.config)

    # ------ CUDA configuration
    cuda = config['cuda']
    device_ids = config['gpu_ids']
    if cuda:
        os.environ['CUDA_VISIBLE_DEVICES'] = ','.join(
            str(i) for i in device_ids)
        device_ids = list(range(len(device_ids)))
        config['gpu_ids'] = device_ids
        cudnn.benchmark = True

        if not os.path.exists(config['output_test_dir']):
            os.makedirs(config['output_test_dir'])

        # Set random seed
        if args.seed is None:
            args.seed = random.randint(1, 10000)
        random.seed(args.seed)
        torch.manual_seed(args.seed)
        if cuda:
            torch.cuda.manual_seed_all(args.seed)

    # first, read images and pick labels with same name
    # we will train all images from HQ dataset

    # ---------- train and test dataset&loader
    try:  # for unexpected error logging
        # Load the dataset
        print("Inference on dataset: {" + config['dataset_name'] + "}")
        test_dataset = Test_Dataset(
            data_path=config['test_data_path'],
            with_subfolder=config['data_with_subfolder'],
            image_shape=config['image_shape'],
            random_crop=config['random_crop'],
            return_name=True)
        test_loader = torch.utils.data.DataLoader(
            dataset=test_dataset,
            batch_size=config['batch_size'],
            shuffle=False,
            num_workers=config['num_workers'])

        # [Trainer] (in test, not use trainer class directly)
        netG = Parser(config, cuda, device_ids)

        # Get the resume iteration to restart training
        #================== <LOAD CHECKPOINT FILE starting with parser*.pt> ============================
        last_checkpoint_file = get_model_list(config['resume'], "parser",
                                              config['resume_iter'])
        netG.load_state_dict(torch.load(last_checkpoint_file))
        print("Resume from {}".format(config['resume']))

        # CUDA AVAILABLE
        if cuda:
            netG = nn.parallel.DataParallel(netG, device_ids=device_ids)

        # connect loaders to iter()
        iterable_test_loader = iter(test_loader)

        # learing rate
        #lr = config['lr']

        print('Inference Start.........')
        start_iteration = 0

        # =============== TEST ===================
        for iteration in range(start_iteration, config['niter'] + 1):
            print('ITERATION {}..... [{}/{}]'.format(
                iteration, iteration * config['batch_size'],
                int(len(test_dataset.samples) / config['batch_size'])))
            try:
                test_img_names, test_orig_images = iterable_test_loader.next()
            except StopIteration:
                iterable_test_loader = iter(test_loader)
                test_img_names, test_orig_images = iterable_test_loader.next()

            if cuda:
                test_orig_images = test_orig_images.cuda()

            # <predict test set>
            test_predict = netG(test_orig_images)

            for test_idx in range(test_orig_images.shape[0]):
                pred_out = torch.argmax(test_predict[test_idx], dim=0)
                test_sam = pred_out.cpu().numpy()

                if VIEW_COLORED_SEGMENTATION:
                    decoded = decode_segmap(test_sam)
                    misc.imsave(
                        config['output_test_dir'] + test_img_names[test_idx] +
                        '.png', decoded)
                else:
                    cv2.imwrite(
                        config['output_test_dir'] + test_img_names[test_idx] +
                        '.png', test_sam)

    except Exception as e:  # for unexpected error logging (set
        print("{e}")
        raise e
Ejemplo n.º 9
0
if args.seed is None:
    args.seed = random.randint(1, 10000)
print("Random seed: {}".format(args.seed))
random.seed(args.seed)
torch.manual_seed(args.seed)
if cuda:
    torch.cuda.manual_seed_all(args.seed)
# print("Configuration: {}".format(config))

# Define the trainer
netG = Generator(config['netG'], cuda, device_ids)
# Resume weight
# if cuda:
#     netG.cuda()
last_model_name = get_model_list(args.checkpoint_path,
                                 "gen",
                                 iteration=args.iter)
last_model_name = args.which_model

# last_model_name = args.which_model
# print("loading model from here --------------> {}".format(last_model_name))


# if not cuda:
#     netG.load_state_dict(torch.load(last_model_name, map_location='cpu'))
# else:
#     netG.load_state_dict(torch.load(last_model_name))
def init_weights(m):
    if type(m) == nn.Linear:
        torch.nn.init.xavier_uniform(m.weight)
        m.bias.data.fill_(0.01)
Ejemplo n.º 10
0
def main():
    args = parser.parse_args()
    config = get_config(args.config)

    # CUDA configuration
    cuda = config['cuda']
    device_ids = config['gpu_ids']
    if cuda:
        os.environ['CUDA_VISIBLE_DEVICES'] = ','.join(str(i) for i in device_ids)
        device_ids = list(range(len(device_ids)))
        config['gpu_ids'] = device_ids
        cudnn.benchmark = True

    # Set random seed
    if args.seed is None:
        args.seed = random.randint(1, 10000)
    # print("Random seed: {}".format(args.seed))
    random.seed(args.seed)
    torch.manual_seed(args.seed)
    if cuda:
        torch.cuda.manual_seed_all(args.seed)

    t0 = time.time()
    dataset = datasets.LoadImages(args.image)
    chunker = ImageChunker(config['image_shape'][0], 
                           config['image_shape'][1], 
                           args.overlap)
    try:  # for unexpected error logging
        with torch.no_grad():   # enter no grad context
            # Set checkpoint path
            if not args.checkpoint_path:
                checkpoint_path = os.path.join('checkpoints', config['dataset_name'],
                                               config['mask_type'] + '_' + config['expname'])
            else:
                checkpoint_path = args.checkpoint_path
            last_model_name = get_model_list(checkpoint_path, "gen", iteration=args.iter)

            prev_fname = ''
            vid_writer = None
            for fpath, img_ori, vid_cap in dataset :
                imgs, masks = [], []
                if prev_fname == fpath :
                    frame += 1 # increase frame number if still on the same file
                else :
                    frame = 0 # start frame number
                    _, img_h, img_w = img_ori.shape
                    txtfile = pathlib.Path(fpath).with_suffix('.txt') # Load mask txt file
                    txtfile = os.path.join(args.output, str(txtfile).split('/')[-1])
                    if os.path.exists(txtfile) :
                        bboxes, bframes = load_bbox_txt(txtfile, img_w, img_h)
                    assert len(bboxes) == len(bframes)

                idx = [ii for ii, val in enumerate(bframes) if val==frame]
                bndbxs = [bboxes[ii] for ii in idx]
                img_ori = np.moveaxis(img_ori, 0, -1)
                if len(bndbxs) > 0 : # if any logo detected
                    mask_ori = create_mask(bndbxs, img_w, img_h)
                    # fig, axes = plt.subplots(1,2); axes[0].imshow(img_ori[0]); axes[1].imshow(mask_ori); plt.show()
                    chunked_images = chunker.dimension_preprocess(np.array(deepcopy(img_ori)))
                    chunked_masks = chunker.dimension_preprocess(np.array(deepcopy(mask_ori)))
                    for (x, msk) in zip(chunked_images, chunked_masks) :
                        x = transforms.ToTensor()(x)
                        mask = transforms.ToTensor()(msk)[0].unsqueeze(dim=0)
                        # x = normalize(x)
                        x = x * (1. - mask)
                        x = x.unsqueeze(dim=0)
                        mask = mask.unsqueeze(dim=0)
                        imgs.append(x)
                        masks.append(mask)

                    # Define the trainer
                    netG = Generator(config['netG'], cuda, device_ids)
                    netG.load_state_dict(torch.load(last_model_name))
                    model_iteration = int(last_model_name[-11:-3])
                    # print("Resume from {} at iteration {}".format(checkpoint_path, model_iteration))

                    pred_imgs = []
                    for (x, mask) in zip(imgs, masks) :
                        if torch.max(mask) == 1 :
                            if cuda:
                                netG = nn.parallel.DataParallel(netG, device_ids=device_ids)
                                x = x.cuda()
                                mask = mask.cuda()

                            # Inference
                            x1, x2, offset_flow = netG(x, mask)
                            inpainted_result = x2 * mask + x * (1. - mask)
                            inpainted_result = inpainted_result.squeeze(dim=0).permute(1,2,0).cpu()
                            pred_imgs.append(inpainted_result.numpy())
                        else :
                            pred_imgs.append(x.squeeze(dim=0).permute(1,2,0).numpy())

                    pred_imgs = np.asarray(pred_imgs, dtype=np.float32)
                    reconstructed_image = chunker.dimension_postprocess(pred_imgs, np.array(img_ori))
                    reconstructed_image = np.uint8(reconstructed_image[:, :, ::-1]*255) # BGR to RGB, and rescaling
                else : # no logo detected
                    reconstructed_image = img_ori[:, :, ::-1]

                # Save results (image with detections)
                outname = fpath.split('/')[-1]
                outname = outname.split('.')[0] + '-inp.' + outname.split('.')[-1]
                outpath = os.path.join(args.output, outname)
                if dataset.mode == 'images':
                    cv2.imwrite(outpath, reconstructed_image)
                    print("Saved the inpainted image to {}".format(outpath))
                else :
                    if fpath != prev_fname:  # new video
                        if isinstance(vid_writer, cv2.VideoWriter):
                            vid_writer.release()  # release previous video writer
                            print("Saved the inpainted video to {}".format(outpath))

                        fps = vid_cap.get(cv2.CAP_PROP_FPS)
                        w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH))
                        h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
                        vid_writer = cv2.VideoWriter(outpath, cv2.VideoWriter_fourcc(*args.fourcc), fps, (w, h))
                    vid_writer.write(reconstructed_image)
                    prev_fname = fpath                
    # exit no grad context
    except Exception as err:  # for unexpected error logging
        print("Error: {}".format(err))
        pass
    print('Inpainting: (%.3fs)' % (time.time() - t0))