示例#1
0
def load_data(batch_size, dataset_name, images_per_model, run_dirs):
    df = pd.DataFrame()
    image_shape = None
    noise_data = None
    for run_dir, generation in run_dirs:
        target_size = len(df) + images_per_model
        if generation is None:
            config.gan.dataset = dataset_name
            #     config.gan.dataset_resize = [64, 64]
            dataset = GanTrain.create_dataset()
            train_loader = torch.utils.data.DataLoader(dataset, batch_size=batch_size, drop_last=True)
            # load images from dataset
            for images, labels in train_loader:
                image_shape = images.shape[1:]
                df_new = pd.DataFrame(images.numpy().reshape((-1, np.prod(image_shape))))
                df_new["model"] = dataset_name
                df_new["run_dir"] = run_dir
                df_new["generation"] = None
                df_new["y"] = np.zeros(len(images)) if len(labels.shape) > 1 else labels.numpy()
                df = df.append(df_new)
                if len(df) >= target_size:
                    break
        else:
            if noise_data is None:
                noise_data = Generator().generate_noise(images_per_model, volatile=True)
                print("noise data created", noise_data.shape)
            last_model = sorted(glob.glob(os.path.join(run_dir, "generations", f"{generation:03}", "generator.pkl")))[
                -1]
            best_generator = tools.cuda(Generator.load(last_model))
            n = 0
            while len(df) < target_size:
                noise = noise_data[n:min(n+batch_size, len(noise_data))]
                n += batch_size
                images = best_generator(noise).detach().cpu().numpy()
                image_shape = images.shape[1:]
                df_new = pd.DataFrame(images.reshape((-1, np.prod(image_shape))))
                df_new["model"] = f"{run_dir}|{generation}"
                df_new["run_dir"] = run_dir
                df_new["generation"] = generation
                df_new["y"] = np.zeros(len(images))
                df = df.append(df_new)
                del noise
                if len(df) >= target_size:
                    break
            best_generator = best_generator.cpu()
            torch.cuda.empty_cache()
    print(df.describe())
    return df, image_shape
示例#2
0
def store(path, size, dataset_name="CIFAR10", generator_path=None):
    path = os.path.join(base_path, path)
    os.makedirs(path, exist_ok=True)
    if generator_path:
        generator = Generator.load(generator_path)
        generator_dataset = GeneratorDataset(generator, size=size)
        store_from_dataset(generator_dataset, path, size)
    else:
        util.config.gan.dataset = dataset_name
        train = GanTrain(log_dir="/tmp")
        dataset = train.train_loader.dataset
        store_from_dataset(dataset, path, size)
示例#3
0
 def generate_intial_population(self):
     generators = []
     discriminators = []
     for i in range(config.gan.generator.population_size):
         G = Generator(output_size=self.input_shape)
         G.setup()
         generators.append(G)
     for i in range(config.gan.discriminator.population_size):
         D = Discriminator(output_size=1, input_shape=[1]+list(self.input_shape))  # [1] is the batch dimension
         D.setup()
         discriminators.append(D)
     return Population(generators, desired_species=config.evolution.speciation.size),\
            Population(discriminators, desired_species=config.evolution.speciation.size)
示例#4
0
    def test_serialization(self):
        images = tools.cuda(Variable(torch.randn(5, 100)).view(5, 1, 10, 10))
        input_shape = images[0].size()
        discriminator = Discriminator(output_size=1,
                                      input_shape=[1] + list(input_shape))
        discriminator.setup()
        generator = Generator(output_size=input_shape)
        generator.setup()

        generator = tools.cuda(generator)
        discriminator = tools.cuda(discriminator)
        discriminator.do_train(generator, images)
        generator.do_train(discriminator, images)

        # save and load the discriminator
        discriminator_path = f"{self.test_path}/discriminator.pkl"
        discriminator.save(discriminator_path)
        loaded_discriminator = Discriminator.load(discriminator_path)
        self.assert_state_dict_equal(discriminator.state_dict(),
                                     loaded_discriminator.state_dict())

        # save and load the generator
        generator_path = f"{self.test_path}/generator.pkl"
        generator.save(generator_path)
        loaded_generator = Generator.load(generator_path)
        loaded_generator = tools.cuda(loaded_generator)
        generator = tools.cuda(generator)
        self.assert_state_dict_equal(generator.state_dict(),
                                     loaded_generator.state_dict())
        # check if the loaded generator will generate images in the same way that the original generator
        noise = generator.generate_noise(1, volatile=True)
        diff = generator(noise) - loaded_generator(noise)
        self.assertAlmostEqual(0, diff.sum().item(), 6)
        # execute a train step and now it should be different
        generator.do_train(tools.cuda(discriminator), images)
        self.assertFalse(generator(noise).equal(loaded_generator(noise)))
示例#5
0
def main(samples=100, output="output"):
    path = os.path.join(os.path.dirname(__file__), "generator.pkl")
    generator = Generator.load(path)
    generator_dataset = GeneratorDataset(generator, size=samples)
    os.makedirs(output, exist_ok=True)
    store_from_dataset(generator_dataset, output)
示例#6
0
    fid_stat = ""  #os.path.join(base_path, "fid_stats_cifar10_train.npz")
    util.config.gan.dataset = "CIFAR10"
    logger.info(f"start gan train")
    train = GanTrain(log_dir="/tmp")
    fid_size = 10000

    if fid_stat and os.path.exists(fid_stat):
        f = np.load(fid_stat)
        m, s = f['mu'][:], f['sigma'][:]
        print("fid_stats_cifar10_train", m, s)
        base_fid_statistics = m, s
        f.close()
        inception_model = build_inception_model()
    else:
        logger.info(f"init fid")
        initialize_fid(train.train_loader, size=fid_size)
        m, s = base_fid_statistics
        print("calc fid stats", m, s)
        logger.info(f"finish fid")

    images = []
    generator_path = os.path.join(base_path, "./generator.pkl")
    if generator_path:
        generator = Generator.load(generator_path)
        dataset = GeneratorDataset(generator, size=fid_size)
    else:
        dataset = train.validation_loader.dataset
    logger.info("start fid %d", fid_size)
    ret = fid_images(dataset, size=fid_size)
    logger.info("FID %s", ret)