Пример #1
0
    def perform_eval(self):
        fid_fake_path = osp.join(self.env['base_path'], "..", "sr_fid",
                                 str(self.env["step"]))
        os.makedirs(fid_fake_path, exist_ok=True)
        counter = 0
        for batch in tqdm(self.dataloader):
            lq = batch['lq'].to(self.env['device'])
            gen = self.model(lq)
            if not isinstance(gen, list) and not isinstance(gen, tuple):
                gen = [gen]
            gen = gen[self.gen_output_index]

            # Remove low-frequency differences
            gen_lf = interpolate(interpolate(gen,
                                             scale_factor=1 / self.scale,
                                             mode="area"),
                                 scale_factor=self.scale,
                                 mode="nearest")
            gen_hf = gen - gen_lf
            hq_lf = interpolate(lq, scale_factor=self.scale, mode="nearest")
            hq_gen_hf_applied = hq_lf + gen_hf

            for b in range(self.batch_sz):
                torchvision.utils.save_image(
                    hq_gen_hf_applied[b],
                    osp.join(fid_fake_path, "%i_.png" % (counter)))
                counter += 1

        return {
            "sr_fid":
            fid_score.calculate_fid_given_paths(
                [self.fid_real_samples, fid_fake_path], self.batch_sz, True,
                2048)
        }
Пример #2
0
def calculate_fid(generator, nz, data, batch_size, cuda=True):
    if data == 'cifar10':
        fid_stats_path = './fid_stats_cifar10_train.npz'
    elif data == 'celebA':
        fid_stats_path = './fid_stats_celeba.npz'

    #Saves images to be calculated for FID - not necessary why not pass through inception first to save time
    start_t = time.time()
    generated_images_folder_path = '/home/yoni/Datasets/fid_images'
    number_fid = 5000 // batch_size
    for idx in range(0, number_fid):
        z_fid = torch.randn(batch_size, nz, 1, 1, device=device)
        g_z_fid = generator(z_fid)
        for idx_fid in range(0, batch_size):
            vutils.save_image(tensor=g_z_fid[idx_fid],
                              fp=generated_images_folder_path + '/' + 'img' +
                              str(idx * batch_size + idx_fid) + '.png',
                              nrow=1,
                              padding=0)

    # fid_score = fid.calculate_fid_given_paths(paths=[generated_images_folder_path, fid_stats_path],
    #                                                           inception_path='./inception_model/')
    fid_score = fid_torch.calculate_fid_given_paths(
        paths=[generated_images_folder_path, fid_stats_path],
        batch_size=batch_size,
        dims=2048,
        cuda=cuda)
    finish_t = time.time()
    print('The fid score is {} and was calcualted in {} (seconds)'.format(
        fid_score, (finish_t - start_t)))
    return fid_score
Пример #3
0
    def calculate_fid(self, num_batches):
        torch.cuda.empty_cache()

        real_path = str(self.results_dir / self.name / 'fid_real') + '/'
        fake_path = str(self.results_dir / self.name / 'fid_fake') + '/'

        # remove any existing files used for fid calculation and recreate directories
        rmtree(real_path, ignore_errors=True)
        rmtree(fake_path, ignore_errors=True)
        os.makedirs(real_path)
        os.makedirs(fake_path)

        for batch_num in tqdm(range(num_batches), desc='calculating FID - saving reals'):
            real_batch = next(self.loader)
            for k in range(real_batch.size(0)):
                torchvision.utils.save_image(real_batch[k, :, :, :], real_path + '{}.png'.format(k + batch_num * self.batch_size))

        # generate a bunch of fake images in results / name / fid_fake
        self.GAN.eval()
        ext = self.image_extension

        latent_dim = self.GAN.latent_dim
        image_size = self.GAN.image_size

        for batch_num in tqdm(range(num_batches), desc='calculating FID - saving generated'):
            # latents and noise
            latents = torch.randn(self.batch_size, latent_dim).cuda(self.rank)

            # moving averages
            generated_images = self.generate_truncated(self.GAN.GE, latents)

            for j in range(generated_images.size(0)):
                torchvision.utils.save_image(generated_images[j, :, :, :], str(Path(fake_path) / f'{str(j + batch_num * self.batch_size)}-ema.{ext}'))

        return fid_score.calculate_fid_given_paths([real_path, fake_path], 256, True, 2048)
Пример #4
0
def test_calculate_fid_given_statistics(mocker, tmp_path, device):
    dim = 2048
    m1, m2 = np.zeros((dim, )), np.ones((dim, ))
    sigma = np.eye(dim)

    def dummy_statistics(path, model, batch_size, dims, device):
        if path.endswith('1'):
            return m1, sigma
        elif path.endswith('2'):
            return m2, sigma
        else:
            raise ValueError

    mocker.patch('pytorch_fid.fid_score.compute_statistics_of_path',
                 side_effect=dummy_statistics)

    dir_names = ['1', '2']
    paths = []
    for name in dir_names:
        path = tmp_path / name
        path.mkdir()
        paths.append(str(path))

    fid_value = fid_score.calculate_fid_given_paths(paths,
                                                    batch_size=dim,
                                                    device=device,
                                                    dims=dim)

    # Given equal covariance, FID is just the squared norm of difference
    assert fid_value == np.sum((m1 - m2)**2)
Пример #5
0
    def perform_eval(self):
        fid_fake_path = osp.join(self.env['base_path'], "../", "fid",
                                 str(self.env["step"]))
        os.makedirs(fid_fake_path, exist_ok=True)
        counter = 0
        self.model.eval()
        for i in range(self.batches_per_eval):
            if self.noise_type == 'imgnoise':
                batch = torch.FloatTensor(self.batch_sz, 3,
                                          self.im_sz, self.im_sz).uniform_(
                                              0., 1.).to(self.env['device'])
            elif self.noise_type == 'stylenoise':
                batch = torch.randn(self.batch_sz,
                                    self.latent_dim).to(self.env['device'])
            gen = self.model(batch)
            if not isinstance(gen, list) and not isinstance(gen, tuple):
                gen = [gen]
            gen = gen[self.gen_output_index]
            gen = (gen - self.image_norm_range[0]) / (
                self.image_norm_range[1] - self.image_norm_range[0])
            for b in range(self.batch_sz):
                torchvision.utils.save_image(
                    gen[b], osp.join(fid_fake_path, "%i_.png" % (counter)))
                counter += 1
        self.model.train()

        print("Got all images, computing fid")
        return {
            "fid":
            fid_score.calculate_fid_given_paths(
                [self.fid_real_samples, fid_fake_path], self.batch_sz, True,
                2048)
        }
Пример #6
0
def test(generated_images_dir):
    f = open('score.txt', 'a+')
    # load images
    print("Loading images...")

    input_images, target_images, generated_images, names = load_generated_images(
        generated_images_dir)

    print("Compute inception score...")
    inception_score = get_inception_score(generated_images)
    print("Inception score %s" % inception_score[0])

    print("Compute structured similarity score (SSIM)...")
    structured_score = ssim_score(generated_images, target_images)
    print("SSIM score %s" % structured_score)

    print("Compute FID score...")
    FID_score = fid_score.calculate_fid_given_paths(
        [generated_images, target_images], 1, dims=2048)
    print("FID score %s" % FID_score)

    print("Compute LPIPS score...")
    LPIPS_score = lpips_score(generated_images, target_images)
    print("LPIPS score %s" % LPIPS_score)

    msg = "Inception score = %s; SSIM score = %s; FID score = %s; LPIPS score = %s" % (
        inception_score, structured_score, FID_score, LPIPS_score)
    print(msg)
    f.writelines('\nTarget image dir %s\n' % generated_images_dir)
    f.writelines("%s\n\n" % msg)
    f.close()
Пример #7
0
def validate(args, fid_stat, gen_net: nn.Module, writer_dict, valid_loader):

    writer = writer_dict['writer']
    global_steps = writer_dict['valid_global_steps']

    fid_buffer_dir_gen = os.path.join(args.path_helper['sample_path'],
                                      'fid_buffer_gen')
    os.makedirs(fid_buffer_dir_gen, exist_ok=True)

    for iter_idx, (real_imgs, _) in enumerate(tqdm(valid_loader)):
        z = torch.cuda.FloatTensor(
            np.random.uniform(-1, 1, (args.eval_batch_size, args.latent_dim)))

        gen_imgs = gen_net(z)

        for img_idx, img in enumerate(gen_imgs):
            file_name = os.path.join(fid_buffer_dir_gen,
                                     f'iter{iter_idx}_b{img_idx}.png')
            save_image(img, file_name, normalize=True)

    logger.info('=> calculate fid score')
    fid_score = calculate_fid_given_paths(paths=[fid_stat, fid_buffer_dir_gen],
                                          batch_size=args.eval_batch_size)
    print(f"FID score: {fid_score}")

    os.system('rm -r {}'.format(fid_buffer_dir_gen))

    writer.add_scalar('FID_score', fid_score, global_steps)
    writer_dict['valid_global_steps'] = global_steps + 1

    return fid_score
Пример #8
0
def validate(
    output_path: Path,
    generator: Generator,
    latent_dim: int,
    device: torch.device,
):
    # TODO: cleanup.
    n_samples = 20
    output_path /= "val"
    output_path.mkdir(exist_ok=True, parents=True)
    """Saves a grid of generated digits ranging from 0 to n_classes"""
    # Sample noise for each image.
    z = torch.randn((n_samples, latent_dim), device=device)
    # Get labels ranging from 0 to n_classes for n_rows.
    labels = torch.randint(0, 5, (n_samples, ), device=device)
    with torch.no_grad():
        gen_imgs = generator(z, labels)

    coloured_val = colour_labels(gen_imgs)

    for i in range(coloured_val.shape[0]):
        save_image(coloured_val[i, :, :, :], output_path / "{}.png".format(i))

    input_path = Path("data") / "colour" / "fgadr" / "label"
    paths = [str(input_path), str(output_path)]

    fid_score = calculate_fid_given_paths(paths, n_samples, device, 2048)

    return fid_score
def fid(paths, batch_size=50, device=None, dims=2048):
    """Calculate FID (Fréchet Inception Distance).

    Ref:
    GANs Trained by a Two Time-Scale Update Rule Converge to a Local Nash
    Equilibrium. In NeurIPS 2017. <https://arxiv.org/pdf/1706.08500.pdf>

    Args:
        paths (list of two str): Two paths to the real and generated images.
        batch_size (int): Batch size. A reasonable batch size depends on the
            hardware. Default: 50.
        device (str): Device to use, like 'cuda', 'cuda:0' or 'cpu'.
            Default: None (if set to None, it depends the availability).
        dims (int): Dimensionality of Inception features. Default: 2048.

    Returns:
        float: fid result.
    """

    assert len(paths) == 2, ('Two valid image paths should be given, '
                             f'but got {len(paths)} paths')

    if device is None:
        device = torch.device('cuda' if (torch.cuda.is_available()) else 'cpu')
    else:
        device = torch.device(device)

    return fid_score.calculate_fid_given_paths(paths=paths,
                                               batch_size=batch_size,
                                               device=device,
                                               dims=dims)
Пример #10
0
    def fid_step(self, fid_dir):
        print('entered fid:', self.global_rank)
        N = 5000
        bs = 16
        sizes = [bs] * (N // bs) + [N % bs]

        backup = copy.deepcopy([p.data for p in self.G.parameters()])
        for i, G_param in enumerate(self.G.parameters()):
            G_param.data.copy_(getattr(self, f'G_ema_{i}'))

        cnt = 0
        for sz in tqdm(sizes):
            z = torch.randn(sz, self.cfg.model.nz).to(self.device)
            xf_ema = self.G(z).detach().cpu().numpy()
            if self.global_rank == 0:
                for i in range(sz):
                    cur_image = (((xf_ema[i].transpose(1, 2, 0) + 1) / 2.0) *
                                 255.0).astype('uint8')
                    cv2.imwrite(os.path.join(fid_dir, f'{cnt}.png'), cur_image)
                    cnt += 1

        for G_param, backup_param in zip(self.G.parameters(), backup):
            G_param.data.copy_(backup_param)

        if self.global_rank == 0:
            data_dir = self.cfg.data.dir
            paths = [
                os.path.join(data_dir,
                             list(os.walk(data_dir))[0][1][0]), fid_dir
            ]
            fid = fid_score.calculate_fid_given_paths(paths, 16, self.device,
                                                      2048)
            print('FID logged')
            self.logger.experiment.add_scalar("FID", fid, self.global_step)
        print('finished fid:', self.global_rank)
Пример #11
0
def run_test(all_states):
    all_types = [ROOM_CLASS[k] for k in ROOM_CLASS]
    all_states = [[
        all_types[l] for l in range(len(all_types))
        if dict_states['var_{}_{}'.format(k, l)]
    ] for k in range(10)]
    print(all_states)
    os.makedirs('./FID/test/opt/', exist_ok=True)

    # compute FID for a given sequence
    for i, sample in enumerate(fp_loader):
        if i == 100:
            break
        mks, nds, eds, _, _ = sample
        real_nodes = np.where(nds.detach().cpu() == 1)[-1]

        #### FIX PER ROOM TYPE #####
        # generate final layout initialization
        for j in range(1):
            prev_fixed_nodes_state = []
            curr_fixed_nodes_state = []
            curr_gen_mks = gen_state(curr_fixed_nodes_state,
                                     prev_fixed_nodes_state,
                                     sample,
                                     initial_state=True)

            # generate per room type
            for _types in all_states:
                if len(_types) > 0:
                    curr_fixed_nodes_state = np.concatenate(
                        [np.where(real_nodes == _t)[0] for _t in _types])
                else:
                    curr_fixed_nodes_state = np.array([])
                curr_gen_mks = gen_state(curr_fixed_nodes_state,
                                         prev_fixed_nodes_state,
                                         sample,
                                         initial_state=False)
                prev_fixed_nodes_state = list(curr_fixed_nodes_state)

            # save final floorplans
            imk = draw_masks(curr_gen_mks.copy(), real_nodes)
            imk = torch.tensor(np.array(imk).transpose((2, 0, 1))) / 255.0
            save_image(imk,
                       './FID/test/opt/{}_{}.png'.format(i, j),
                       nrow=1,
                       normalize=False)

    # write current results
    fid_value = calculate_fid_given_paths(['./FID/gt/', './FID/test/opt/'], 2,
                                          'cpu', 2048)
    out_str = "curr trial {} {}".format(' '.join(map(str, all_states)),
                                        fid_value)
    print(out_str)
    with open('./FID/opt_results.txt', 'a') as f:
        f.write("{}\n".format(out_str))
    return fid_value
Пример #12
0
    def test(self, generator, test_csv):
        test_files = pd.read_csv(test_csv)
        avg_fid = 0
        avg_psnr = 0
        for k in range(len(test_files)):
            img = Image.open(test_files.iloc[k, 0])
            img_hr_array = img_as_float(np.array(img))
            img_hr_wd = as_windows(img_hr_array, self.step, self.patch_size)

            img_lr = img.resize((int(img.size[1] / self.up_scale),
                                 int(img.size[0] / self.up_scale)))
            img_lr = img_lr.resize(img.size, Image.BILINEAR)
            img_lr = img_lr.filter(
                ImageFilter.GaussianBlur(radius=((self.up_scale - 1) / 2)))
            img_lr_array = img_as_float(np.array(img_lr))
            img_lr_wd = as_windows(img_lr_array, self.step, self.patch_size)

            with open(
                    os.path.join(self.result_dir,
                                 'temp_patch/TileConfiguration.txt'),
                    'w') as text_file:
                print('dim = {}'.format(2), file=text_file)
                with torch.no_grad():
                    generator.eval()
                    for i in range(0, img_lr_wd.shape[1]):
                        for j in range(0, img_lr_wd.shape[0]):
                            target = img_hr_wd[j, i]
                            patch = img_lr_wd[j, i].transpose(
                                (2, 0, 1))[None, :]
                            patch_tensor = torch.from_numpy(patch).float().to(
                                self.device)
                            prediction = generator(patch_tensor)
                            io.imsave(
                                'output/temp_patch/{}_{}.png'.format(j, i),
                                img_as_ubyte(
                                    np.clip(prediction.cpu().numpy()[0], 0,
                                            1)))
                            io.imsave(
                                'output/temp_patch_target/{}_{}.png'.format(
                                    j, i), img_as_ubyte(target))
                            print('{}_{}.png; ; ({}, {})'.format(
                                j, i, i * self.step, j * self.step),
                                  file=text_file)
            fid = fid_score.calculate_fid_given_paths(
                (os.path.join(self.result_dir, 'output/temp_patch'),
                 os.path.join(self.result_dir, 'output/temp_patch_target')), 8,
                self.device, 2048)
            avg_fid = avg_fid + fid

            psnr = p_snr('output/temp_patch', 'output/temp_patch_target')
            avg_psnr = avg_psnr + psnr
            psnr = avg_psnr / len(test_files)

        fid = avg_fid / len(test_files)
        return fid, psnr
Пример #13
0
    def calc_fid(self, no_images: int = 128):
        """Use pytorch_fid to calculate fid value for real and generated images"""

        real_path = "/content/gdrive/MyDrive/FrogGAN/tmp/tmp_reals"
        fake_path = "/content/gdrive/MyDrive/FrogGAN/tmp/tmp_fakes"
        self.create_tmp_images(path=fake_path, no_images=no_images)
        self._create_tmp_reals(path=real_path, no_images=no_images)

        return calculate_fid_given_paths(
            [real_path, fake_path], batch_size=no_images, device=self.device, dims=2048
        )
Пример #14
0
def fid(model_path, params, num=1000, device=None):
    # Generate the reals and fakes.
    gen = torch.load(model_path, map_location=device)["generator"]
    real_dir, fake_dir = _write_reals_and_fakes(FID,
                                                gen,
                                                params,
                                                num=num,
                                                device=device)

    # Compute the FID score using the last layer of the InceptionV3 network.
    print("Computing FID score...")
    return fid_score.calculate_fid_given_paths([real_dir, fake_dir],
                                               batch_size=params["bsize"],
                                               device=device,
                                               dims=2048)
Пример #15
0
    def perform_eval(self):
        # Attempt to make the dataset deterministic.
        self.dataset.reset_random()
        dataloader = DataLoader(self.dataset,
                                self.batch_sz,
                                sampler=self.sampler,
                                num_workers=0)

        fid_fake_path = osp.join(self.env['base_path'], "..", "fid",
                                 str(self.env["step"]))
        os.makedirs(fid_fake_path, exist_ok=True)
        counter = 0
        for batch in tqdm(dataloader):
            batch = {
                k:
                v.to(self.env['device']) if isinstance(v, torch.Tensor) else v
                for k, v in batch.items()
            }
            gen = self.gd(batch)[self.out_key]

            # All gather if we're in distributed mode.
            if torch.distributed.is_available(
            ) and torch.distributed.is_initialized():
                gather_list = [
                    torch.zeros_like(gen)
                    for _ in range(torch.distributed.get_world_size())
                ]
                torch.distributed.all_gather(gather_list, gen)
                gen = torch.cat(gather_list, dim=0)

            if self.env['rank'] <= 0:
                for g in gen:
                    torchvision.utils.save_image(
                        g, osp.join(fid_fake_path, f"{counter}.png"))
                    counter += 1

        if self.env['rank'] <= 0:
            return {
                "fid":
                fid_score.calculate_fid_given_paths(
                    [self.fid_real_samples, fid_fake_path],
                    self.fid_batch_size, True, 2048)
            }
        else:
            return {}
Пример #16
0
    def calculate_fid(self, num_batches):
        from pytorch_fid import fid_score
        torch.cuda.empty_cache()

        real_path = self.fid_dir / 'real'
        fake_path = self.fid_dir / 'fake'

        # remove any existing files used for fid calculation and recreate directories

        if not real_path.exists() or self.clear_fid_cache:
            rmtree(real_path, ignore_errors=True)
            os.makedirs(real_path)

            for batch_num in tqdm(range(num_batches), desc='calculating FID - saving reals'):
                real_batch = next(self.loader)
                for k, image in enumerate(real_batch.unbind(0)):
                    filename = str(k + batch_num * self.batch_size)
                    torchvision.utils.save_image(image, str(real_path / f'{filename}.png'))

        # generate a bunch of fake images in results / name / fid_fake

        rmtree(fake_path, ignore_errors=True)
        os.makedirs(fake_path)

        self.GAN.eval()
        ext = self.image_extension

        latent_dim = self.GAN.G.latent_dim
        image_size = self.GAN.G.image_size
        num_layers = self.GAN.G.num_layers

        for batch_num in tqdm(range(num_batches), desc='calculating FID - saving generated'):
            # latents and noise
            latents = noise_list(self.batch_size, num_layers, latent_dim, device=self.rank)
            noise = image_noise(self.batch_size, image_size, device=self.rank)

            # moving averages
            generated_images = self.generate_truncated(self.GAN.SE, self.GAN.GE, latents, noise, trunc_psi = self.trunc_psi)

            for j, image in enumerate(generated_images.unbind(0)):
                torchvision.utils.save_image(image, str(fake_path / f'{str(j + batch_num * self.batch_size)}-ema.{ext}'))

        return fid_score.calculate_fid_given_paths([str(real_path), str(fake_path)], 256, noise.device, 2048)
Пример #17
0
    def perform_eval(self):
        embedding_generator = self.env['generators'][self.embedding_generator]
        fid_fake_path = osp.join(self.env['base_path'], "../../models",
                                 "fid_fake", str(self.env["step"]))
        os.makedirs(fid_fake_path, exist_ok=True)
        fid_real_path = osp.join(self.env['base_path'], "../../models",
                                 "fid_real", str(self.env["step"]))
        os.makedirs(fid_real_path, exist_ok=True)
        counter = 0
        for batch in self.sampler:
            noise = torch.FloatTensor(self.batch_sz,
                                      3, self.im_sz, self.im_sz).uniform_(
                                          0., 1.).to(self.env['device'])
            batch_hq = [e['hq'] for e in batch]
            batch_hq = torch.stack(batch_hq, dim=0).to(self.env['device'])
            resized_batch = torch.nn.functional.interpolate(batch_hq,
                                                            scale_factor=1 /
                                                            self.scale,
                                                            mode="area")
            embedding = embedding_generator(resized_batch)
            gen = self.model(noise, embedding)
            if not isinstance(gen, list) and not isinstance(gen, tuple):
                gen = [gen]
            gen = gen[self.gen_output_index]
            out = gen + torch.nn.functional.interpolate(
                resized_batch, scale_factor=self.scale, mode='bilinear')
            for b in range(self.batch_sz):
                torchvision.utils.save_image(
                    out[b], osp.join(fid_fake_path, "%i_.png" % (counter)))
                torchvision.utils.save_image(
                    batch_hq[b], osp.join(fid_real_path,
                                          "%i_.png" % (counter)))
                counter += 1

        return {
            "fid":
            fid_score.calculate_fid_given_paths([fid_real_path, fid_fake_path],
                                                self.batch_sz, True, 2048)
        }
Пример #18
0
    def perform_eval(self):
        fid_fake_path = osp.join(self.env['base_path'], "..", "fid",
                                 str(self.env["step"]))
        os.makedirs(fid_fake_path, exist_ok=True)
        counter = 0
        for batch in tqdm(self.dataloader):
            lq = batch['lq'].to(self.env['device'])
            gen = self.model(lq)
            if not isinstance(gen, list) and not isinstance(gen, tuple):
                gen = [gen]
            gen = gen[self.gen_output_index]

            for b in range(self.batch_sz):
                torchvision.utils.save_image(
                    gen[b], osp.join(fid_fake_path, "%i_.png" % (counter)))
                counter += 1

        return {
            "fid":
            fid_score.calculate_fid_given_paths(
                [self.fid_real_samples, fid_fake_path], self.batch_sz, True,
                2048)
        }
Пример #19
0
        derain_psnr = cal_psnr(tensor1=out, tensor2=img_gt)
        derain_ssim = cal_ssim(tensor1=out, tensor2=img_gt)
        derain_psnrs.append(derain_psnr)
        derain_ssims.append(derain_ssim)

        print('img:%s, type:%s,  psnr:%.2f,  ssim:%.4f' %
              (path, model_type, derain_psnr, derain_ssim))

        logimg = make_grid(out.data.clamp(0., 1.),
                           nrow=8,
                           normalize=False,
                           scale_each=False)
        save_image(
            logimg,
            os.path.join(opt.out, model_type,
                         '%s_%s.png' % (img_name, model_type)))

    print('MEAN, data:%s, type:%s,  psnr:%.2f,  ssim:%.4f' %
          (opt.data, model_type, np.array(derain_psnrs).mean(),
           np.array(derain_ssims).mean()))

fid = calculate_fid_given_paths(
    paths=[os.path.join(opt.data, 'gt'),
           os.path.join(opt.out, model_type)],
    batch_size=1,
    cuda='True',
    dims=2048,
)
print('FID of %s: %.4f' % (type, fid))
Пример #20
0
import argparse
import os

from pytorch_fid.fid_score import calculate_fid_given_paths

if __name__ == '__main__':

    parser = argparse.ArgumentParser()
    parser.add_argument("--inpainted_dir", type=str, default="places2/images/inpainted/0.2/DF-Net",
                        help='directory of the images to classify')
    parser.add_argument("--original_image_dir", type=str, default="places2/images/original",
                        help='directory of the original images')
    parser.add_argument("--batch_size", type=int, default=50, help='batch size')
    parser.add_argument("--gpu", type=str, default='0', help='GPU id, -1 for CPU')

    args = parser.parse_args()

    os.environ['VISIBLE_CUDA_DEVICES'] = args.gpu

    method = args.inpainted_dir.split('/')[-1]
    mask = float(args.inpainted_dir.split('/')[-2])

    fid = calculate_fid_given_paths([args.original_image_dir, args.inpainted_dir], args.batch_size, args.gpu, 2048)
    print('FID for {}, with {}% mask: {}'.format(method, int(mask * 100), fid))
Пример #21
0
import os
os.environ['OPENBLAS_CORETYPE'] = 'Haswell'

from pytorch_fid.fid_score import calculate_fid_given_paths
fid_value = calculate_fid_given_paths(['./FID/gt/', './FID/test/debug/'], 2,
                                      'cpu', 2048)
print(fid_value)
Пример #22
0
def calculate_FIDs(dataset, fold=1):
    #    dataset='Zurich'
    #    fold=1

    assert dataset in ['Balvan', 'Eliceiri', 'Zurich'
                       ], "dataset must be in ['Balvan', 'Eliceiri', 'Zurich']"
    if dataset == 'Eliceiri':
        dataroot_real = f'./Datasets/{dataset}_patches'
        dataroot_fake = f'./Datasets/{dataset}_patches_fake'
        dataroot_train = f'./Datasets/{dataset}_temp'
    else:
        dataroot_real = f'./Datasets/{dataset}_patches/fold{fold}'
        dataroot_fake = f'./Datasets/{dataset}_patches_fake/fold{fold}'
        dataroot_train = f'./Datasets/{dataset}_temp/fold{fold}'

    gan_names = [
        'train2testA', 'train2testB', 'testA', 'testB', 'B2A', 'cyc_A',
        'cyc_B', 'drit_A', 'drit_B', 'p2p_A', 'p2p_B', 'star_A', 'star_B',
        'comir'
    ]
    #    gan_names = ['cyc_A', 'cyc_B', 'drit_A', 'drit_B', 'p2p_A', 'p2p_B', 'star_A', 'star_B', 'comir', 'B2A']

    # csv information
    header = [
        'Dataset',
        'Fold',
        'Tlevel',
        'GAN_name',
        'Path_fake',
        'Path_real',
        'FID',
    ]
    df = pd.DataFrame(columns=header)

    row_dict = {'Dataset': dataset, 'Fold': fold}

    for tlevel in [
            int(tl[-1]) for tl in glob(f'{dataroot_fake}/patch_tlevel*')
    ]:
        row_dict['Tlevel'] = tlevel
        for gan_name in gan_names:
            row_dict['GAN_name'] = gan_name
            if gan_name in ['train2testA', 'train2testB']:
                row_dict[
                    'Path_fake'] = f'{dataroot_train}/{gan_name[-1]}/train/'
                row_dict[
                    'Path_real'] = f'{dataroot_real}/patch_tlevel{tlevel}/{gan_name[-1]}/test/'
            elif gan_name in ['testA', 'testB']:
                row_dict[
                    'Path_fake'] = f'{dataroot_real}/patch_tlevel{tlevel}/{gan_name[-1]}/test/'
                row_dict[
                    'Path_real'] = f'{dataroot_real}/patch_tlevel{tlevel}/{gan_name[-1]}/test/'
            elif gan_name == 'comir':
                row_dict[
                    'Path_fake'] = f'{dataroot_fake}/patch_tlevel{tlevel}/{gan_name}_A/'
                row_dict[
                    'Path_real'] = f'{dataroot_fake}/patch_tlevel{tlevel}/{gan_name}_B/'
            elif gan_name == 'B2A':
                row_dict[
                    'Path_fake'] = f'{dataroot_real}/patch_tlevel{tlevel}/A/test/'
                row_dict[
                    'Path_real'] = f'{dataroot_real}/patch_tlevel{tlevel}/B/test/'
            else:
                row_dict[
                    'Path_fake'] = f'{dataroot_fake}/patch_tlevel{tlevel}/{gan_name}/'
                row_dict[
                    'Path_real'] = f'{dataroot_real}/patch_tlevel{tlevel}/{gan_name[-1]}/test/'
            row_dict['FID'] = fid_score.calculate_fid_given_paths(
                [row_dict['Path_fake'], row_dict['Path_real']], batch_size,
                device, dim)

            df = df.append(row_dict, ignore_index=True)

    result_dir = dataroot_fake
    if not os.path.exists(result_dir):
        os.makedirs(result_dir)
    df.to_csv(f'{result_dir}/FIDs.csv')

    return
def test(args, generator, test_csv, stitching=False):
    print('loading ImageJ, please wait')
    ij = imagej.init('fiji/fiji/Fiji.app/')
    shutil.rmtree('output')
    os.makedirs('output', exist_ok=True)
    os.makedirs('output/lr', exist_ok=True)
    os.makedirs('output/hr', exist_ok=True)
    os.makedirs('output/sr', exist_ok=True)
    os.makedirs('output/temp_patch', exist_ok=True)
    os.makedirs('output/temp_patch_target', exist_ok=True)
    os.makedirs('output/temp_channel', exist_ok=True)
    step = 192
    test_files = pd.read_csv(test_csv)
    avg_fid = 0
    avg_psnr = 0
    for k in range(len(test_files)):
        img = Image.open(test_files.iloc[k, 0])
        img_hr_array = img_as_float(np.array(img))
        img_lr = img.resize((int(img.size[1] / args.up_scale),
                             int(img.size[0] / args.up_scale)))
        img_lr = img_lr.resize(img.size, Image.BILINEAR)
        img_lr = img_lr.filter(
            ImageFilter.GaussianBlur(radius=((args.up_scale - 1) / 2)))
        img_lr_array = img_as_float(np.array(img_lr))
        pad_h = int((np.floor(img_lr_array.shape[0] / step) * step +
                     args.patch_size) - img_lr_array.shape[0])
        pad_w = int((np.floor(img_lr_array.shape[1] / step) * step +
                     args.patch_size) - img_lr_array.shape[1])
        img_lr_array_padded = pad(img_lr_array,
                                  ((0, pad_h), (0, pad_w), (0, 0)),
                                  mode='reflect')
        img_lr_wd = view_as_windows(img_lr_array_padded,
                                    (args.patch_size, args.patch_size, 3),
                                    step=step)
        img_lr_wd = np.squeeze(img_lr_wd)
        img_hr_array_padded = pad(img_hr_array,
                                  ((0, pad_h), (0, pad_w), (0, 0)),
                                  mode='reflect')
        img_hr_wd = view_as_windows(img_hr_array_padded,
                                    (args.patch_size, args.patch_size, 3),
                                    step=step)
        img_hr_wd = np.squeeze(img_hr_wd)
        with open('output/temp_patch/TileConfiguration.txt', 'w') as text_file:
            print('dim = {}'.format(2), file=text_file)
            with torch.no_grad():
                generator.eval()
                for i in range(0, img_lr_wd.shape[1]):
                    for j in range(0, img_lr_wd.shape[0]):
                        target = img_hr_wd[j, i]
                        patch = img_lr_wd[j, i].transpose((2, 0, 1))[None, :]
                        patch_tensor = torch.from_numpy(patch).float().cuda()
                        prediction = generator(patch_tensor)
                        io.imsave(
                            'output/temp_patch/{}_{}.tiff'.format(j, i),
                            img_as_ubyte(
                                np.clip(prediction.cpu().numpy()[0], 0, 1)))
                        io.imsave(
                            'output/temp_patch_target/{}_{}.tiff'.format(j, i),
                            img_as_ubyte(target))
                        print('{}_{}.tiff; ; ({}, {})'.format(
                            j, i, i * step, j * step),
                              file=text_file)
        fid = fid_score.calculate_fid_given_paths(
            ('output/temp_patch', 'output/temp_patch_target'), 8, 'cuda:0',
            2048)
        avg_fid = avg_fid + fid
        if stitching:
            sys.stdout.write('\r{}/{} stitching, please wait...'.format(
                k + 1, len(test_files)))
            params = {
                'type': 'Positions from file',
                'order': 'Defined by TileConfiguration',
                'directory': 'output/temp_patch',
                'ayout_file': 'TileConfiguration.txt',
                'fusion_method': 'Linear Blending',
                'regression_threshold': '0.30',
                'max/avg_displacement_threshold': '2.50',
                'absolute_displacement_threshold': '3.50',
                'compute_overlap': False,
                'computation_parameters':
                'Save computation time (but use more RAM)',
                'image_output': 'Write to disk',
                'output_directory': 'output/temp_channel'
            }
            plugin = "Grid/Collection stitching"
            ij.py.run_plugin(plugin, params)
            list_channels = [f for f in os.listdir('output/temp_channel')]
            c1 = io.imread(
                os.path.join('output/temp_channel', list_channels[0]))
            c2 = io.imread(
                os.path.join('output/temp_channel', list_channels[1]))
            c3 = io.imread(
                os.path.join('output/temp_channel', list_channels[2]))
            c1 = c1[:img.size[1], :img.size[0]]
            c2 = c2[:img.size[1], :img.size[0]]
            c3 = c3[:img.size[1], :img.size[0]]
            img_to_save = np.clip(
                np.stack((c1, c2, c3)).transpose((1, 2, 0)), 0, 1)
            io.imsave(
                os.path.join(
                    'output/sr',
                    os.path.basename(test_files.iloc[k, 0]).replace(
                        '.jpg', '.tiff')), img_as_ubyte(img_to_save))
            io.imsave(
                os.path.join(
                    'output/lr',
                    os.path.basename(test_files.iloc[k, 0]).replace(
                        '.jpg', '.tiff')), img_as_ubyte(img_lr_array))
            io.imsave(
                os.path.join(
                    'output/hr',
                    os.path.basename(test_files.iloc[k, 0]).replace(
                        '.jpg', '.tiff')), img_as_ubyte(img))
        else:
            psnr = p_snr('output/temp_patch', 'output/temp_patch_target')
            avg_psnr = avg_psnr + psnr
        if stitching:
            psnr = p_snr('output/sr', 'output/hr')
        else:
            psnr = avg_psnr / len(test_files)
    fid = avg_fid / len(test_files)
    return fid, psnr
Пример #24
0
        #### FIX PER ROOM TYPE #####
        # generate final layout initialization
        for j in range(1):
            prev_fixed_nodes_state = []
            curr_fixed_nodes_state = []
            curr_gen_mks = gen_state(curr_fixed_nodes_state, prev_fixed_nodes_state, sample, initial_state=True)

            # generate per room type
            for _types in all_states:
                if len(_types) > 0:
                    curr_fixed_nodes_state = np.concatenate([np.where(real_nodes == _t)[0] for _t in _types])
                else:
                    curr_fixed_nodes_state = np.array([])
                    
                curr_gen_mks = gen_state(curr_fixed_nodes_state, prev_fixed_nodes_state, sample, initial_state=False)
                prev_fixed_nodes_state = list(curr_fixed_nodes_state)

            # save final floorplans
            imk = draw_masks(curr_gen_mks, real_nodes)
            imk = torch.tensor(np.array(imk).transpose((2, 0, 1)))/255.0
            save_image(imk, './FID/test/debug/{}_{}.png'.format(i, j), nrow=1, normalize=False)

    # write current results
    fid_value = calculate_fid_given_paths(['./FID/gt_small_{}/'.format(target_set), './FID/test/debug/'], 2, 'cpu', 2048)
    FIDS.append(fid_value)
    with open('./FID/results_{}.txt'.format(run), 'w') as f:
        f.write("\n".join([str(f) for f in FIDS]))
print('FID: ', FIDS)


Пример #25
0
def run_test(dict_states):
    
    N_states = [1 for k in range(10)]
    dirpath = Path('./FID/test/opt')
    if dirpath.exists() and dirpath.is_dir():
        shutil.rmtree(dirpath)
    os.makedirs('./FID/test/opt/', exist_ok=True)
    avg_mistakes = []
    # compute FID for a given sequence
    n_valid_houses = 0
    for i, sample in enumerate(fp_loader):
        if i == 1000:
            break
        mks, nds, eds, _, _ = sample
        real_nodes = np.where(nds.detach().cpu()==1)[-1]
        true_graph_obj = draw_graph([real_nodes, eds.detach().cpu().numpy()])
        
        #### FIX PER ROOM TYPE #####
        # generate final layout initialization
        for j in range(1):
            prev_fixed_nodes_state = []
            curr_fixed_nodes_state = []
            curr_gen_mks = gen_state(curr_fixed_nodes_state, prev_fixed_nodes_state, sample, initial_state=True, true_graph_obj=true_graph_obj, N=1)
            
            # generate per room type
            for _iter in range(10):
                
#                 all_types = [ROOM_CLASS[k]-1 for k in ROOM_CLASS]
#                 sets = {0:'A', 1:'A', 2:'A', 3:'A', 4:'B', 5:'B', 6:'B', 7:'C', 8:'C', 9:'C'}
#                 s = sets[_iter]
#                 _, per_type_mistakes = estimate_graph(curr_gen_mks.copy(), real_nodes, true_graph_obj, per_node=True)
#                 _types = [_t for _t in all_types if (_t not in per_type_mistakes.keys()) and (random.uniform(0, 1) < dict_states['var_v_{}_{}'.format(s, _t)])] # add all without mistakes with some probability
#                 _types += [_t for _t in all_types if (_t in per_type_mistakes.keys()) and (random.uniform(0, 1) < dict_states['var_i_{}_{}'.format(s, _t)])] # add all with mistakes with some probability
                                
                _, per_type_mistakes = estimate_graph(curr_gen_mks.copy(), real_nodes, true_graph_obj, per_node=True)
                _types = [_t for _t in all_types if (_t not in per_type_mistakes.keys()) and (_iter >= dict_states['var_v_{}'.format(_t)])] # add valid after k-th iteration
                _types += [_t for _t in all_types if (_t in per_type_mistakes.keys()) and (_iter >= dict_states['var_i_{}'.format(_t)])] # add invalid after k-th iteration
                
                if len(_types) > 0:
                    curr_fixed_nodes_state = np.concatenate([np.where(real_nodes == _t)[0] for _t in _types])
                else:
                    curr_fixed_nodes_state = np.array([])
                curr_gen_mks = gen_state(curr_fixed_nodes_state, prev_fixed_nodes_state, sample, initial_state=False, true_graph_obj=true_graph_obj, N=1)
                prev_fixed_nodes_state = list(curr_fixed_nodes_state)

            mistakes = estimate_graph(curr_gen_mks.copy(), real_nodes, true_graph_obj)
            avg_mistakes.append(mistakes)
            if mistakes == 0:
                n_valid_houses += 1
            imk = draw_masks(curr_gen_mks.copy(), real_nodes)
            imk = torch.tensor(np.array(imk).transpose((2, 0, 1)))/255.0
            save_image(imk, './FID/test/opt/{}_{}.png'.format(i, j), nrow=1, normalize=False)
            
            
    fid_value = calculate_fid_given_paths(['./FID/val/', './FID/test/opt/'], 2, 'cpu', 2048)
    ratio_valid_houses = 1.0-n_valid_houses/1000.0
#     avg_mistakes = np.mean(fid_value)
    out_str = "all_states {} {}\n".format(str(dict_states), fid_value)
    print(out_str)
    with open('./FID/opt_results_fid_dynamic.txt', 'a') as f:
        f.write("{}\n".format(out_str))
    return fid_value