Beispiel #1
0
    def get_current_visuals(self, size=512):
        out = []
        visual_imgs = []
        out.append(utils.tensor_to_numpy(self.img_LR))
        out.append(utils.tensor_to_numpy(self.img_SR))
        out.append(utils.tensor_to_numpy(self.img_HR))

        out_imgs = [utils.batch_numpy_to_image(x, size) for x in out]

        visual_imgs += out_imgs
        visual_imgs.append(utils.color_parse_map(self.ref_mask_onehot, size))
        visual_imgs.append(utils.color_parse_map(self.hr_mask, size))

        return visual_imgs
Beispiel #2
0
    def get_current_visuals(self, size=512):
        out = []
        visual_imgs = []
        out.append(utils.tensor_to_numpy(self.img_LR))
        out.append(utils.tensor_to_numpy(self.img_SR))
        out.append(utils.tensor_to_numpy(self.img_HR))
        out_imgs = [utils.batch_numpy_to_image(x, size) for x in out]

        visual_imgs.append(out_imgs[0])
        visual_imgs.append(out_imgs[1])
        visual_imgs.append(utils.color_parse_map(self.pred_Parse))
        visual_imgs.append(utils.color_parse_map(self.gt_Parse.unsqueeze(1)))
        visual_imgs.append(out_imgs[2])

        return visual_imgs
Beispiel #3
0
def enhance_faces(LQ_faces, model):
    hq_faces = []
    lq_parse_maps = []
    for lq_face in tqdm(LQ_faces):
        with torch.no_grad():
            lq_tensor = torch.tensor(lq_face.transpose(2, 0, 1)) / 255. * 2 - 1
            lq_tensor = lq_tensor.unsqueeze(0).float().to(model.device)
            parse_map, _ = model.netP(lq_tensor)
            parse_map_onehot = (parse_map == parse_map.max(dim=1, keepdim=True)[0]).float()
            _, output_SR = model.netG(lq_tensor, parse_map_onehot)
        hq_faces.append(utils.tensor_to_img(output_SR))
        lq_parse_maps.append(utils.color_parse_map(parse_map_onehot)[0])
    return hq_faces, lq_parse_maps
    model.eval()
    max_size = 9999
    os.makedirs(os.path.join(save_dir, 'sr'), exist_ok=True)
    for i, data in tqdm(enumerate(dataset),
                        total=len(dataset) // opt.batch_size):
        inp = data['LR']
        with torch.no_grad():
            parse_map, _ = netP(inp)
            parse_map_sm = (parse_map == parse_map.max(
                dim=1, keepdim=True)[0]).float()
            output_parse, output_SR = netG(inp, parse_map_sm)
        img_path = data['LR_paths']  # get image paths
        for i in tqdm(range(len(img_path))):
            inp_img = utils.batch_tensor_to_img(inp)
            output_sr_img = utils.batch_tensor_to_img(output_SR)
            ref_parse_img = utils.color_parse_map(parse_map_sm)

            save_path = os.path.join(save_dir, 'lq',
                                     img_path[i].split('/')[-1])
            os.makedirs(os.path.join(save_dir, 'lq'), exist_ok=True)
            save_img = Image.fromarray(inp_img[i])
            save_img.save(save_path)

            save_path = os.path.join(save_dir, 'hq',
                                     img_path[i].split('/')[-1])
            os.makedirs(os.path.join(save_dir, 'hq'), exist_ok=True)
            save_img = Image.fromarray(output_sr_img[i])
            save_img.save(save_path)

            save_path = os.path.join(save_dir, 'parse',
                                     img_path[i].split('/')[-1])