Beispiel #1
0
 def sample_latent(self, n_samples=1, seed=None, truncation=None):
     if seed is None:
         seed = np.random.randint(np.iinfo(
             np.int32).max)  # use (reproducible) global rand state
     noise = zdataset.z_sample_for_model(self.model, n_samples,
                                         seed=seed)[...]
     return noise.to(self.device)
Beispiel #2
0
def main():
    expdir = 'results/pgw/%s/%s/layer%d' % (ganname, modelname, layernum)

    if ganname == 'proggan':
        model = setting.load_proggan(modelname).cuda()
        zds = zdataset.z_dataset_for_model(model, size=1000)
        writer = ganrewrite.ProgressiveGanRewriter
    elif ganname == 'stylegan':
        model = load_seq_stylegan(modelname, mconv='seq')
        zds = zdataset.z_dataset_for_model(model, size=1000)
        writer = ganrewrite.SeqStyleGanRewriter

    model.eval()
    gw = writer(model, zds, layernum, cachedir=expdir)

    images = []
    with torch.no_grad():
        for _ in tqdm(range(N//batch_size + 1)):
            z = zdataset.z_sample_for_model(model, size=batch_size, seed=len(images)).cuda()
            samples = gw.sample_image_patch(z, crop_size)
            samples = [s.data.cpu() for s in samples]
            images.extend(samples)
        images = torch.stack(images[:N], dim=0)
    
    gt_fid = 0
    fake_fid = compute_fid(images, f'{modelname}_cropped_{images.size(2)}_{ganname}')
    save_image(images[:32] * 0.5 + 0.5, f'patches_{layernum}_{ganname}_{modelname}_{crop_size}.png')

    return fake_fid, gt_fid, images.size(2)
Beispiel #3
0
 def standard_z_sample(self, size=100, seed=1, device=None):
     '''
     Generate a standard set of random Z as a (size, z_dimension) tensor.
     With the same random seed, it always returns the same z (e.g.,
     the first one is always the same regardless of the size.)
     '''
     result = z_sample_for_model(self.model, size)
     if device is not None:
         result = result.to(device)
     return result
Beispiel #4
0
def seeded_cropped_sample(g,
                          gw,
                          imgnum,
                          crop_seed,
                          crop_size,
                          act=True,
                          size=None):

    with torch.no_grad():
        z = zdataset.z_sample_for_model(g, size=1, seed=imgnum).cuda()
        return gw.sample_image_patch(z,
                                     crop_size,
                                     seed=crop_seed,
                                     act=act,
                                     size=size)
Beispiel #5
0
def get_samples(model, name, nimgs=50000, truncated=False):
    batch_size = 10
    g = load_model(model, name, truncated)
    g.eval()

    with torch.no_grad():
        samples = []
        for _ in tqdm(range(nimgs // batch_size + 1)):
            seed = len(samples) if samples is not None else 0
            z = zdataset.z_sample_for_model(g, size=batch_size,
                                            seed=seed).cuda()
            x_real = g(z)
            x_real = [x.detach().cpu() for x in x_real]
            samples.extend(x_real)
        samples = torch.stack(samples, dim=0)
        return samples
Beispiel #6
0
    def optimize_neurons(self):

        # Set up console output
        verbose_progress(True)

        gan_model = self.generator.model
        annotate_model_shapes(gan_model, gen=True)

        outdir = os.path.join(
            self.args.results, 'dissect',
            self.args.name_checkpoint + '_' + str(time.time()))
        os.makedirs(outdir, exist_ok=True)

        size = 1000

        sample = z_sample_for_model(gan_model, size)

        train_sample = z_sample_for_model(gan_model, size, seed=2)

        dataset = TensorDataset(sample)
        train_dataset = TensorDataset(train_sample)
        self.cluster_segmenter = ClusterSegmenter(self.model, self.clusters,
                                                  self.mean_clust,
                                                  self.std_clust)

        segrunner = GeneratorSegRunner(self.cluster_segmenter)

        netname = outdir
        # Run dissect
        with torch.no_grad():
            dissect(
                outdir,
                gan_model,
                dataset,
                train_dataset=train_dataset,
                segrunner=segrunner,
                examples_per_unit=20,
                netname=netname,
                quantile_threshold='iqr',
                meta=None,
                make_images=False,  # True,
                make_labels=True,
                make_maxiou=False,
                make_covariance=False,
                make_report=True,
                make_row_images=True,
                make_single_images=True,
                batch_size=8,
                num_workers=8,
                rank_all_labels=True)

            sample_ablate = z_sample_for_model(gan_model, 16)

            dataset_ablate = TensorDataset(sample_ablate)
            data_loader = torch.utils.data.DataLoader(dataset_ablate,
                                                      batch_size=8,
                                                      shuffle=False,
                                                      num_workers=8,
                                                      pin_memory=True,
                                                      sampler=None)

            with open(os.path.join(outdir, 'dissect.json')) as f:
                data = EasyDict(json.load(f))
            dissect_layer = {lrec.layer: lrec for lrec in data.layers}

            self.layers_units = {
                'layer2': [],
                'layer3': [],
                'layer4': [],
                'layer5': [],
                'layer6': [],
            }

            noise_units = np.array([35, 221, 496, 280])

            for i in range(2, len(self.clusters) + 2):
                print('Cluster', i)
                rank_name = 'c_{0}-iou'.format(i)
                for l in range(len(self.layer_list_all)):
                    ranking = next(
                        r
                        for r in dissect_layer[self.layer_list_all[l]].rankings
                        if r.name == rank_name)
                    unit_list = np.array(range(512))
                    unit_list[noise_units] = 0
                    ordering = np.argsort(ranking.score)
                    units_list = unit_list[ordering]
                    self.layers_units[self.layer_list_all[l]].append(
                        units_list)

        # Mark the directory so that it's not done again.
        mark_job_done(outdir)