def generate_batch_translation_dataset(self, batch_size, truncation=0.4):
        # generate noise -> BxN(2)x128
        noise_A = truncated_noise_sample(batch_size=batch_size,
                                         truncation=truncation)
        noise_B = truncated_noise_sample(batch_size=batch_size,
                                         truncation=truncation)
        n_A = torch.from_numpy(noise_A).unsqueeze(0)
        n_B = torch.from_numpy(noise_B).unsqueeze(0)
        n_AB = torch.cat([n_A, n_B], dim=0).to(self.device)
        n_AB = n_AB.view((N // 2) * batch_size, -1)  # (B*2)x128

        # generate class -> BxN(2)x1000
        idx_list = range(len(self.class_list))
        idx_ab = [random.sample(idx_list, 2)
                  for i in range(batch_size)]  # 2 pair idx
        class_ab = [[
            self.class_list[idx_ab[i][0]], self.class_list[idx_ab[i][1]]
        ] for i in range(len(idx_ab))]
        class_ab = np.array(class_ab).transpose()
        class_A = one_hot_from_int(class_ab[0],
                                   batch_size=batch_size)  # Bx1000 numpy array
        class_B = one_hot_from_int(class_ab[1], batch_size=batch_size)
        c_A = torch.from_numpy(class_A).unsqueeze(0)
        c_B = torch.from_numpy(class_B).unsqueeze(0)
        c_AB = torch.cat([c_A, c_B], dim=0).to(self.device)
        c_AB = c_AB.view((N // 2) * batch_size, -1)  # (B*2)x1000

        with torch.no_grad():
            output = self.big_gan(n_AB[A + B + A + B], c_AB[A + B + B + A],
                                  truncation)

        # generate class idx
        idx_ab = torch.from_numpy(np.array(idx_ab).transpose())  # 2xB
        return output.view(N, batch_size, 3, output.size(2),
                           output.size(3)), idx_ab.long()
    def generate_batch_morphing_dataset(self,
                                        batch_size,
                                        interp_noise=True,
                                        interp_class=True,
                                        truncation=0.4):

        # generate noise -> BxN(3)x128
        noise_A = truncated_noise_sample(batch_size=batch_size,
                                         truncation=truncation)
        noise_B = truncated_noise_sample(
            batch_size=batch_size,
            truncation=truncation) if interp_noise else 0
        n_A = torch.from_numpy(noise_A).expand(N, batch_size, -1)
        n_B = torch.from_numpy(noise_B).expand(
            N, batch_size, -1) if interp_noise else n_A.clone()

        # generate class -> BxN(3)x1000
        idx_list = range(len(self.class_list))
        idx_ab = [random.sample(idx_list, 2)
                  for i in range(batch_size)]  # 2 pair idx
        class_ab = [[
            self.class_list[idx_ab[i][0]], self.class_list[idx_ab[i][1]]
        ] for i in range(len(idx_ab))]
        class_ab = np.array(class_ab).transpose()
        class_A = one_hot_from_int(class_ab[0],
                                   batch_size=batch_size)  # Bx1000 numpy array
        class_B = one_hot_from_int(class_ab[1], batch_size=batch_size)

        c_A = torch.from_numpy(class_A).expand(N, batch_size, -1)
        c_B = torch.from_numpy(class_B).expand(
            N, batch_size, -1) if interp_class else c_A.clone()

        # # sample index for intermediate image
        # idx_0 = random.randint(0, n_frames-i_frames)
        # idx_1 = idx_0 + i_frames - 1
        # idx_i = random.randint(idx_0, idx_1) # sample index for intermediate image, including end points

        # interpolation -> BxN(3)xZ
        # t = torch.linspace(start=0, end=1.0, steps=n_frames)
        # t = t[[idx_0, idx_i, idx_1]].unsqueeze(1).unsqueeze(1).expand(3, batch_size, 1)
        a = torch.cat([
            torch.zeros(1, batch_size, 1),
            torch.rand(1, batch_size, 1),
            torch.ones(1, batch_size, 1)
        ])
        n = self.interp_func(n_A, n_B, a).to(self.device)
        c = self.interp_func(c_A, c_B, a).to(self.device)

        n = n.view(N * batch_size, -1)  # (B*N(3))xZ
        c = c.view(N * batch_size, -1)
        with torch.no_grad():
            output = self.big_gan(n, c, truncation)

        # generate class idx
        idx_ab = torch.from_numpy(np.array(idx_ab).transpose())  # 2xB
        return output.view(N, batch_size, 3, output.size(2),
                           output.size(3)), a[1].squeeze(dim=1), idx_ab.long()
Esempio n. 3
0
def recover_latent(trial):
    trueZ = truncated_noise_sample(truncation=truncation, batch_size=batches)
    noise = truncated_noise_sample(truncation=maxNoise, batch_size=batches)
    class_vec = one_hot_from_names(['fountain'], batch_size=batches)

    z = torch.from_numpy(trueZ + noise)
    print(z)
    ##print('diff:\n', z-trueZ)
    opt = optim.Adam([z.requires_grad_()])
    with torch.no_grad():
        trueZImg = model(torch.from_numpy(trueZ), torch.from_numpy(class_vec),
                         truncation).requires_grad_()
    zImg = model(z, torch.from_numpy(class_vec), truncation).requires_grad_()

    zImg0 = zImg.clone()

    i = 0
    while (i < 5):
        lf = nn.MSELoss()
        loss = lf(zImg, trueZImg)
        loss.backward()
        opt.step()
        opt.zero_grad()
        i += 1
        print(i, ': ImageMSE: ', mse_loss(zImg, trueZImg), '\sVecMSE: ',
              mse_loss(z, torch.from_numpy(trueZ)))
        zImg = model(z, torch.from_numpy(class_vec),
                     truncation).requires_grad_()

    ##with torch.no_grad():
    ##zImg = model(torch.from_numpy(z, class_vec, truncation)

    trial = 1

    #Save Images
    saveOriginal = 'output/' + str(trial) + '_original'
    saveNoisy = 'output/' + str(trial) + '_noisy'
    saveFixed = 'output/' + str(trial) + '_fixed'
    ensure_dir(saveOriginal)
    save_as_images(trueZImg, saveOriginal)
    ensure_dir(saveNoisy)
    save_as_images(zImg0, saveNoisy)
    ensure_dir(saveFixed)
    save_as_images(zImg, saveFixed)

    #Save vectors
    saveOriginal = 'output/' + str(trial) + 'originalVec_.pt'
    saveNoisy = 'output/' + str(trial) + '_noisyVec.pt'
    saveFixed = 'output/' + str(trial) + '_fixedVec.pt'
    ensure_dir(saveOriginal)
    torch.save(trueZImg, saveOriginal)
    ensure_dir(saveNoisy)
    torch.save(zImg0, saveNoisy)
    ensure_dir(saveFixed)
    torch.save(zImg, saveFixed)
    def translate_sentiment(self, audio_sentiment, noise=0.1):

        # Step 1: Find the closest word match to the audio sentiment
        dists, idx = self.database.kneighbors(audio_sentiment)
        song_words = [self.words[cur_index[0]] for cur_index in idx]
        song_words = [x.split(',')[0].lower() for x in song_words]
        word_ids = [self.class_ids[cur_index] for cur_index in idx]

        class_vectors = []
        noise_vectors = []

        # Calculate the weighted nearest class neighbors
        for i in range(len(word_ids)):
            cur_class_vector = one_hot_from_int(word_ids[i],
                                                batch_size=len(word_ids[i]))

            # If k = 1
            if len(word_ids[i]) == 1:
                cur_class_vector = cur_class_vector[0]
            else:
                similarities = np.exp(-dists[i] / self.temperature)
                similarities = similarities / np.sum(similarities)
                similarities = similarities.reshape((-1, 1))
                cur_class_vector = np.sum(cur_class_vector * similarities,
                                          axis=0)

            class_vectors.append(cur_class_vector)
            noise_vectors.append(
                truncated_noise_sample(truncation=noise, batch_size=1)[0])

        class_vectors = np.float32(class_vectors)
        noise_vectors = np.float32(noise_vectors)

        return class_vectors, noise_vectors, song_words
Esempio n. 5
0
def generate_random_morph_sequence(count, silent=False):
    noise_vector = truncated_noise_sample(truncation=0.5, batch_size=1)

    nums = []
    for i in range(count):
        all = list(range(1, 1001))
        num = random.choice(all)
        all.remove(num)
        nums.append(num)
    nums.append(nums[0])

    path = "animations/class_%s" % "_".join([str(a) for a in nums])

    if not os.path.exists(path):
        os.makedirs(path)

    frames = []
    for i in range(len(nums) - 1):
        num1 = nums[i]
        num2 = nums[i + 1]
        frames += make_frames(list(range(total_frames + 1)), total_frames,
                              num1, num2, noise_vector, path, i * total_frames)

    if not silent:
        open_file(path)

    convert_to_video(frames)
Esempio n. 6
0
def Hess_all_BigGAN_optim(param):
    lr = 10 ** param[0, 0]
    wd = 10 ** param[0, 1]
    beta1 = 1 - 10 ** param[0, 2]  # param[2] = log10(1 - beta1)
    beta2 = 1 - 10 ** param[0, 3]  # param[3] = log10(1 - beta2)
    noise_init = torch.from_numpy(truncated_noise_sample(1, 128)).cuda()
    class_init = 0.06 * torch.randn(1, 128).cuda()
    latent_coef = (torch.cat((noise_init, class_init), dim=1) @ evc_all).detach().clone().requires_grad_(True)
    optim = Adam([latent_coef], lr=lr, weight_decay=wd, betas=(beta1, beta2))
    # torch.optim.lr_scheduler
    scores_all = []
    for step in range(300):
        optim.zero_grad()
        latent_code = latent_coef @ evc_all.T
        noise_vec = latent_code[:, :128]
        class_vec = latent_code[:, 128:]
        fitimg = BGAN.generator(latent_code, 0.7)
        fitimg = torch.clamp((1.0 + fitimg) / 2.0, 0, 1)
        dsim = alpha * ImDist(fitimg, target_tsr) + L1loss(fitimg, target_tsr)  #
        dsim.backward()
        optim.step()
        scores_all.append(dsim.item())
        if (step + 1) % 10 == 0:
            print("step%d loss %.2f norm: cls: %.2f nois: %.1f" % (step, dsim.item(), class_vec.norm(), noise_vec.norm()))

    imcmp = ToPILImage()(make_grid(torch.cat((fitimg, target_tsr)).cpu()))
    # imcmp.show()
    imcmp.save(join(savedir, "Hall%06d_%.3f.jpg" % (np.random.randint(1000000), dsim.item())))

    plt.figure()
    plt.plot(scores_all)
    plt.title("lr %.E wd %.E beta1 %.3f beta2 %.3f"%(lr,wd,beta1,beta2))
    plt.savefig(join(savedir, "traj_Hall%06d_%.3f.jpg" % (np.random.randint(1000000), dsim.item())))
    return dsim.item() if not torch.isnan(dsim) else 1E6
Esempio n. 7
0
def Hess_sep_BigGAN_optim(param):
    lr1 = 10 ** param[0, 0]
    wd1 = 10 ** param[0, 1]
    lr2 = 10 ** param[0, 2]
    wd2 = 10 ** param[0, 3]
    noise_init = torch.from_numpy(truncated_noise_sample(1, 128)).cuda()
    class_init = 0.06 * torch.randn(1, 128).cuda()
    noise_coef = (noise_init @ evc_nois).detach().clone().requires_grad_(True)
    class_coef = (class_init @ evc_clas).detach().clone().requires_grad_(True)
    optim1 = Adam([noise_coef], lr=lr1, weight_decay=wd1, betas=(0.9, 0.999))
    optim2 = Adam([class_coef], lr=lr2, weight_decay=wd2, betas=(0.9, 0.999))
    # torch.optim.lr_scheduler
    for step in range(300):
        optim1.zero_grad()
        optim2.zero_grad()
        class_vec = class_coef @ evc_clas.T
        noise_vec = noise_coef @ evc_nois.T
        fitimg = BGAN.generator(torch.cat((noise_vec, class_vec), dim=1), 0.7)
        fitimg = torch.clamp((1.0 + fitimg) / 2.0, 0, 1)
        dsim = alpha * ImDist(fitimg, target_tsr) + L1loss(fitimg, target_tsr)  #
        dsim.backward()
        optim1.step()
        optim2.step()
        if (step + 1) % 10 == 0:
            print("step%d loss %.2f norm: cls: %.2f nois: %.1f" % (step, dsim.item(), class_vec.norm(), noise_vec.norm()))

    imcmp = ToPILImage()(make_grid(torch.cat((fitimg, target_tsr)).cpu()))
    imcmp.show()
    imcmp.save(join(savedir, "Hsep%06d_%.3f.jpg" % (np.random.randint(1000000), dsim.item())))
    return dsim.item() if not torch.isnan(dsim) else 1E6
Esempio n. 8
0
def generate_images(num_images, class_index, model, truncation: float = 1.):
    print("[info] initializing input variables...")
    batch_size = min(10, num_images)
    class_vec = one_hot_from_int(class_index, batch_size=num_images) * .97
    noise_vec = truncated_noise_sample(truncation=truncation,
                                       batch_size=num_images)

    noise_tensor = torch.tensor(noise_vec, dtype=torch.float)
    class_tensor = torch.tensor(class_vec, dtype=torch.float)

    print('[info] preparing model inference...')
    noise_tensor = noise_tensor.to(device)
    class_tensor = class_tensor.to(device)

    print(
        f'[info] generating {num_images} images of class index {class_index}...'
    )
    images = []
    for i in range(0, num_images, batch_size):
        torch.cuda.empty_cache()
        noise_feed = noise_tensor[i:i + batch_size]
        class_feed = class_tensor[i:i + batch_size]
        with torch.no_grad():
            output = model(noise_feed, class_feed, truncation)
        output_cpu = output.cpu().data.numpy()
        for out in output_cpu:
            image = np.array(toimage(out))
            images.append(image)
    print('[info] done.\n')
    torch.cuda.empty_cache()
    return images
Esempio n. 9
0
def generate(model):
    # Prepare a input
    truncation = 0.4
    batch_size = 10
    class_vector = one_hot_from_names(['vase'], batch_size=batch_size)
    noise_vector = truncated_noise_sample(truncation=truncation, batch_size=batch_size)

    # All in tensors
    noise_vector = torch.from_numpy(noise_vector)
    class_vector = torch.from_numpy(class_vector)

    # If you have a GPU, put everything on cuda
    noise_vector = noise_vector.to('cuda')
    class_vector = class_vector.to('cuda')
    model.to('cuda')

    # Generate an image
    with torch.no_grad():
        output = model(noise_vector, class_vector, truncation)

    # If you have a GPU put back on CPU
    output = output.to('cpu')

    # If you have a sixtel compatible terminal you can display the images in the terminal
    # (see https://github.com/saitoha/libsixel for details)
    # display_in_terminal(output)

    # Save results as png images
    save_as_images(output, file_name='output/output')
Esempio n. 10
0
def biggan(inp: List[str], metadata):
    truncation = 0.4
    try:
        class_vector = one_hot_from_names(inp, batch_size=len(inp))
        noise_vector = truncated_noise_sample(truncation=truncation, batch_size=len(inp))
        noise_vector = torch.from_numpy(noise_vector)
        class_vector = torch.from_numpy(class_vector)
        with torch.no_grad():
            output = modelBG(noise_vector, class_vector, truncation)
    except:
        inp = ['cat']
        class_vector = torch.from_numpy(one_hot_from_names(inp, batch_size=len(inp)))
        noise_vector = torch.from_numpy(truncated_noise_sample(truncation=truncation, batch_size=len(inp)))
        with torch.no_grad():
            output = modelBG(noise_vector, class_vector, truncation)

    return convert_to_images(output)[0]
Esempio n. 11
0
    def __init__(self, config):
        super(DeepMindBigGANLatentSpace, self).__init__()
        self.config = config

        self.z = torch.nn.Parameter(
            torch.tensor(truncated_noise_sample(self.config.batch_size)).to(
                self.config.device))
        self.class_labels = torch.nn.Parameter(
            torch.rand(self.config.batch_size,
                       self.config.num_classes).to(self.config.device))
Esempio n. 12
0
    def sample_latent(self, batch_size: int, seed: int = None) -> torch.Tensor:
        """Samples random codes from the latent space"""
        if seed is None:
            seed = np.random.randint(np.iinfo(
                np.int32).max)  # use (reproducible) global rand state

        noise_vector = truncated_noise_sample(truncation=self.truncation,
                                              batch_size=batch_size,
                                              seed=seed)
        noise = torch.from_numpy(noise_vector)
        return noise.to(self.device)
Esempio n. 13
0
def create_noise_vector(
    batch_size: int = 1,
    dim_z: int = 128,
    truncation: float = 1.0,
    device: str = "cpu",
    seed: Any = None,
):
    # 643 - weird faces
    noise_vector = truncated_noise_sample(batch_size, dim_z, truncation, seed)
    noise_vector = torch.from_numpy(noise_vector)
    return noise_vector.to(device)
    def get_sample_image(self):
        n = truncated_noise_sample(batch_size=self.batch_size,
                                   truncation=self.truncation)
        c_idx = np.random.choice(self.class_list, size=1)
        c = one_hot_from_int(c_idx, batch_size=self.batch_size)
        # np to torch
        n = torch.from_numpy(n).to(self.device)
        c = torch.from_numpy(c).to(self.device)

        with torch.no_grad():
            output = self.big_gan(n, c, self.truncation)
        return output
Esempio n. 15
0
def generate_class(num):
    truncation = 0.5
    batch_size = 1

    for variation in range(1, 6):
        path = "images/class_%d" % num
        filename = "%s/%d.png" % (path, variation)
        if not os.path.exists(filename):
            noise_vector = truncated_noise_sample(truncation=truncation,
                                                  batch_size=batch_size)
            class_vector = get_classvector(num)
            if not os.path.exists(path):
                os.makedirs(path)
            get_image(class_vector, noise_vector, truncation).save(filename)
Esempio n. 16
0
def generate_image(dense_class_vector=None,
                   name=None,
                   noise_seed_vector=None,
                   truncation=0.4,
                   gan_model=None,
                   pretrained_gan_model_name='biggan-deep-128'):
    """ Utility function to generate an image (numpy uint8 array) from either:
        - a name (string): converted in an associated ImageNet class and then
            a dense class embedding using BigGAN's internal ImageNet class embeddings.
        - a dense_class_vector (torch.Tensor with 128 elements): used as a replacement of BigGAN internal
            ImageNet class embeddings.
        
        Other args:
            - noise_seed_vector: a vector used to control the seed (seed set to the sum of the vector elements)
            - truncation: a float between 0 and 1 to control image quality/diversity tradeoff (see BigGAN paper)
            - gan_model: a BigGAN model from pytorch_pretrained_biggan library.
                If None a model is instanciated from a pretrained model name given by `pretrained_gan_model_name`
                List of possible names: https://github.com/huggingface/pytorch-pretrained-BigGAN#models
            - pretrained_gan_model_name: shortcut name of the GAN model to instantiate if no gan_model is provided. Default to 'biggan-deep-128'
    """
    seed = int(noise_seed_vector.sum().item()
               ) if noise_seed_vector is not None else None
    noise_vector = truncated_noise_sample(truncation=truncation,
                                          batch_size=1,
                                          seed=seed)
    noise_vector = torch.from_numpy(noise_vector)

    if gan_model is None:
        gan_model = BigGAN.from_pretrained(pretrained_gan_model_name)

    if name is not None:
        class_vector = one_hot_from_names([name], batch_size=1)
        class_vector = torch.from_numpy(class_vector)
        dense_class_vector = gan_model.embeddings(class_vector)
        # input_vector = torch.cat([noise_vector, gan_class_vect.unsqueeze(0)], dim=1)
        # dense_class_vector = torch.matmul(class_vector, gan.embeddings.weight.t())
    else:
        dense_class_vector = dense_class_vector.view(1, 128)

    input_vector = torch.cat([noise_vector, dense_class_vector], dim=1)

    # Generate an image
    with torch.no_grad():
        output = gan_model.generator(input_vector, truncation)
    output = output.cpu().numpy()
    output = output.transpose((0, 2, 3, 1))
    output = ((output + 1.0) / 2.0) * 256
    output.clip(0, 255, out=output)
    output = np.asarray(np.uint8(output[0]), dtype=np.uint8)
    return output
Esempio n. 17
0
    def translate_sentiment(self, audio_sentiment, noise):
        song_words = []

        audio_sentiment = torch.tensor(audio_sentiment).to(config['device'])
        class_vectors, noise_vectors = self.net(audio_sentiment)
        class_vectors = torch.softmax(class_vectors / self.temperature, dim=1)
        cur_noise = torch.tensor(
            truncated_noise_sample(batch_size=len(noise_vectors),
                                   truncation=noise)).to(config['device'])
        noise_vectors = noise_vectors * self.noise_scaler + cur_noise

        # Get the names of the imagenet classes
        class_ids = class_vectors.argmax(1).cpu().detach().numpy()
        for i in class_ids:
            song_words.append(self.imagenet_classes[i])

        class_vectors, noise_vectors = class_vectors.cpu().detach().numpy(
        ), noise_vectors.cpu().detach().numpy()
        return class_vectors, noise_vectors, song_words
Esempio n. 18
0
    def generate_ps(self, inp, N):
        truncation = 0.1

        rvs = []
        st = 0
        while st < N:
            ed = min(st+self.batch_size, N)
            class_v = np.random.dirichlet([1]*1000, size=ed-st)
            noise_v = truncated_noise_sample(truncation=truncation, batch_size=ed-st)
            class_v = torch.FloatTensor(class_v)
            noise_v = torch.FloatTensor(noise_v)
            with torch.no_grad():
                output = self.model(noise_v, class_v, truncation)
            output = (output.cpu().numpy() + 1) / 2
            output = output[:, :, 16:-16, 16:-16] #Using only the 224*224 in the middle
            rvs.append(output)
            st = ed
        rvs = np.concatenate(rvs, axis=0)

        return rvs
Esempio n. 19
0
def generate_random_morph():
    all = list(range(1, 1001))
    num1 = random.choice(all)
    all.remove(num1)
    num2 = random.choice(all)
    noise_vector = truncated_noise_sample(truncation=0.5, batch_size=1)
    path = "animations/class_%d_%d" % (num1, num2)

    if not os.path.exists(path):
        os.makedirs(path)

    make_frames(
        list(range(total_frames + 1)),
        total_frames,
        num1,
        num2,
        noise_vector,
        path,
    )

    open_file(path)
def generate_data(random_state, batch_size, num_images_per_classes, device,
                  output_path):
    generator_model = BigGAN.from_pretrained("biggan-deep-128",
                                             cache_dir=os.path.join(
                                                 "./data/checkpoint",
                                                 "cached_model"))
    generator_model = generator_model.to(device)

    # prepare a input
    truncation = 0.4
    op_paths.build_dirs(f"{output_path}")

    for class_idx in range(1000):
        _id = 0
        num_batches = int(num_images_per_classes / batch_size)

        op_paths.build_dirs(f"{output_path}/{class_idx}")
        for _ in range(num_batches):
            class_vector = one_hot_from_int(class_idx, batch_size=batch_size)
            noise_vector = truncated_noise_sample(truncation=truncation,
                                                  batch_size=batch_size)
            noise_vector = torch.from_numpy(noise_vector).to(device)
            class_vector = torch.from_numpy(class_vector).to(device)

            # generate images
            with torch.no_grad():
                generated_images = generator_model(noise_vector, class_vector,
                                                   truncation).clamp(min=-1,
                                                                     max=1)

            for image in generated_images:
                torchvision.utils.save_image(
                    image,
                    fp=f"{output_path}/{class_idx}/{_id}",
                    format="JPEG",
                    scale_each=True,
                    normalize=True,
                )
                _id += 1
        print(f"finished {class_idx + 1}/1000.")
Esempio n. 21
0
def generate_image(thing="mushroom",
                   model_name="biggan-deep-512",
                   truncation=0.4):
    "Generate an image of *thing* from the model, save it and return the path"

    if model_name in ["waifu", "celeb"]:
        return generate_waifu(model_name, truncation)

    global img_i
    model = get_model(model_name)

    # Prepare a input
    class_vector = one_hot_from_names([thing], batch_size=1)
    noise_vector = truncated_noise_sample(truncation=truncation, batch_size=1)

    # All in tensors
    noise_vector = torch.from_numpy(noise_vector)
    class_vector = torch.from_numpy(class_vector)

    # If you have a GPU, put everything on cuda
    noise_vector = noise_vector.to('cuda')
    class_vector = class_vector.to('cuda')
    model.to('cuda')

    # Generate an image
    with torch.no_grad():
        output = model(noise_vector, class_vector, truncation)

    # If you have a GPU put back on CPU
    output = output.to('cpu')
    img = convert_to_images(output)
    out = img[0]
    file_name = f"images/{img_i}.png"
    img_i += 1
    os.system("mkdir -p images/")
    out.save(file_name, 'png')
    print(
        f"Generated an image of {thing} in file {file_name} with model {model_name}"
    )
    return file_name
Esempio n. 22
0
def main():
    args = parser.parse_args()
    import logging
    logging.basicConfig(level=logging.INFO)

    # Load pre-trained model tokenizer (vocabulary)
    global model
    model = BigGAN.from_pretrained(args.model_dir).to('cuda')

    label_str = args.labels.strip()
    labels = [l.replace('_', ' ') for l in label_str.split(',') if len(l)>0]
    class_base_vecs = one_hot_from_names(labels)
    label_alt_str = args.labels_alt.strip()
    labels_alt = [l.replace('_', ' ') for l in label_alt_str.split(',') if len(l)>0]
    print(labels, labels_alt)
    if len(labels_alt) > 0:
        assert len(labels_alt) == len(labels)
        c1 = one_hot_from_names(labels_alt)
        class_base_vecs = args.mixture_prop * class_base_vecs + (1-args.mixture_prop) * c1

    outs = []
    labels = []
    for _ in trange(0, args.n_samples // args.batch_size):
        # Prepare a input
        cls = np.random.randint(0, class_base_vecs.shape[0], size=(args.batch_size,))
        class_vector = class_base_vecs[cls]
        noise_vector = truncated_noise_sample(
            truncation=args.truncation, batch_size=args.batch_size)
        outs.append(gen_image(
            noise_vector, class_vector, args.crop_ratio, args.truncation))
        labels.append(cls)

    outs = np.concatenate(outs)
    labels = np.concatenate(labels)
    np.savez(args.dump_dir+'.npz', args=vars(args), samples=outs, labels=labels)

    Image.fromarray(tile_images(outs[:81])).save(args.dump_dir+'.samples-81.png')
    Image.fromarray(tile_images(outs[:16])).save(args.dump_dir+'.samples-16.png')
Esempio n. 23
0
def optim_BigGAN(param):
    lr1 = 10**param[0,0]
    lr2 = 10**param[0,1]
    wd1 = 10**param[0,2]
    wd2 = 10**param[0,3]
    mom1 = param[0,4]
    mom2 = param[0,5]
    noise_init = torch.from_numpy(truncated_noise_sample(1, 128)).cuda()
    class_init = 0.06 * torch.randn(1, 128).cuda()
    alpha = 5
    class_vec = class_init.detach().clone().cuda().requires_grad_(True)
    noise_vec = noise_init.detach().clone().cuda().requires_grad_(True)
    optim1 = SGD([noise_vec], lr=lr1, weight_decay=wd1, momentum=mom1)
    optim2 = SGD([class_vec], lr=lr2, weight_decay=wd2, momentum=mom2)
    for step in range(300):
        optim1.zero_grad()
        optim2.zero_grad()
        fitimg = BGAN.generator(torch.cat((noise_vec, class_vec), dim=1), 0.7)
        fitimg = torch.clamp((1.0 + fitimg) / 2.0, 0, 1)
        dsim = alpha * ImDist(fitimg, target_tsr) + L1loss(fitimg, target_tsr)  #
        dsim.backward()
        optim1.step()
        optim2.step()
        classnorm = class_vec.norm()
        noisenorm = noise_vec.norm()
        if classnorm > 1.25:
            class_vec = (class_vec / classnorm * 0.7).detach().clone()
            optim2 = SGD([class_vec], lr=lr2, weight_decay=wd2, momentum=mom2)
            print("Class space renormalize")
        if noisenorm > 13:
            noise_vec = (noise_vec / noisenorm * 10).detach().clone()
            optim1 = SGD([noise_vec], lr=lr1, weight_decay=wd1, momentum=mom1)
            print("Noise space renormalize")
        if (step + 1) % 10 == 0:
            print("step%d loss %.2f norm: cls: %.2f nois: %.1f" % (step, dsim.item(), classnorm, noisenorm))
    imcmp = ToPILImage()(make_grid(torch.cat((fitimg, target_tsr)).cpu()))
    imcmp.save(join(savedir, "%06d_%.3f.jpg" % (np.random.randint(1000000), dsim.item())))
    return dsim.item() if not torch.isnan(dsim) else 1E6
    def __getitem__(self, idx):
        if idx > self.n_iters:
            raise StopIteration

        # Sample the space
        idx = np.random.randint(0, 1000, self.in_batch)
        class_vectors = one_hot_from_int(idx, batch_size=self.in_batch)
        noise_vectors = truncated_noise_sample(truncation=self.noise,
                                               batch_size=self.in_batch)
        class_vectors = torch.tensor(class_vectors).to(config['device'])
        noise_vectors = torch.tensor(noise_vectors).to(config['device'])

        with torch.no_grad():
            output = self.gan_model(noise_vectors, class_vectors, self.noise)

        # Convert to PIL Image
        output = output.detach().cpu().numpy()
        output = np.uint8(np.clip(((output + 1) / 2.0) * 256, 0, 255))
        output = output.transpose((0, 2, 3, 1))
        images = []

        # Pre-process each image to feed them into image sentiment analysis model
        for i in range(output.shape[0]):
            cur_img = Image.fromarray(output[i])
            cur_img = self.transform_image(cur_img)
            images.append(cur_img)
        images = torch.stack(images).to(config['device'])

        # Feed-forward image sentiment analysis
        sentiment = self.sentiment_model(images)

        class_vectors = class_vectors.cpu().detach().numpy()
        noise_vectors = noise_vectors.cpu().detach().numpy()
        sentiment = sentiment.cpu().detach().numpy()

        return class_vectors, noise_vectors, sentiment
Esempio n. 25
0
    classes=[classes[s] for s in np.argsort(chromasort[:num_classes])]



#initialize first class vector
cv1=np.zeros(1000)
for pi,p in enumerate(chromasort[:num_classes]):
    
    if num_classes < 12:
        cv1[classes[pi]] = chroma[p][np.min([np.where(chrow>0)[0][0] for chrow in chroma])]       
    else:
        cv1[classes[p]] = chroma[p][np.min([np.where(chrow>0)[0][0] for chrow in chroma])]

#initialize first noise vector
nv1 = truncated_noise_sample(truncation=truncation)[0]

#initialize list of class and noise vectors
class_vectors=[cv1]
noise_vectors=[nv1]

#initialize previous vectors (will be used to track the previous frame)
cvlast=cv1
nvlast=nv1


#initialize the direction of noise vector unit updates
update_dir=np.zeros(128)
for ni,n in enumerate(nv1):
    if n<0:
        update_dir[ni] = 1
Esempio n. 26
0
import torch
from pytorch_pretrained_biggan import (BigGAN, one_hot_from_names, truncated_noise_sample,
                                       save_as_images, display_in_terminal)

# OPTIONAL: if you want to have more information on what's happening, activate the logger as follows
import logging
logging.basicConfig(level=logging.INFO)

# Load pre-trained model tokenizer (vocabulary)
model = BigGAN.from_pretrained('biggan-deep-512')

# Prepare a input
truncation = 0.4
class_vector = one_hot_from_names(['soap bubble', 'coffee', 'mushroom'], batch_size=3)
noise_vector = truncated_noise_sample(truncation=truncation, batch_size=3)

# All in tensors
noise_vector = torch.from_numpy(noise_vector)
class_vector = torch.from_numpy(class_vector)

# # If you have a GPU, put everything on cuda
# noise_vector = noise_vector.to('cuda')
# class_vector = class_vector.to('cuda')
# model.to('cuda')

# Generate an image
with torch.no_grad():
    output = model(noise_vector, class_vector, truncation)

# If you have a GPU put back on CPU
# output = output.to('cpu')
Esempio n. 27
0
evc_clas = torch.from_numpy(data['eigvects_clas_avg']).cuda()
evc_nois = torch.from_numpy(data['eigvects_nois_avg']).cuda()
evc_all = torch.from_numpy(data['eigvects_avg']).cuda()
#%%
from imageio import imread
target = imread("block042_thread000_gen_gen041_001030.bmp")
target_tsr = torch.from_numpy(target / 255.0).permute([2, 0, 1]).unsqueeze(0)
target_tsr = target_tsr.float().cuda()
#%%
"""
Provide a bunch of metric, 
"""
#%% Official CMAES algorithm
import cma
optimizer = cma.CMAEvolutionStrategy(128 * [0.0], 0.06)
fixnoise = truncated_noise_sample(1,128)
noise_vec = torch.from_numpy(fixnoise)
#%%
import tqdm
for i in tqdm.trange(50):
    codes = optimizer.ask()
    # boundary handling

    # evaluate and passing values
    codes_tsr = torch.from_numpy(np.array(codes)).float()
    latent_code = torch.cat((noise_vec.repeat(18, 1), codes_tsr), dim=1).cuda()
    with torch.no_grad():
        imgs = BGAN.generator(latent_code, 0.7)
        imgs = (imgs + 1.0) / 2.0
        dsims = ImDist(imgs, target_tsr).squeeze()
        L1dsim = (imgs - target_tsr).abs().mean([1,2,3])
Esempio n. 28
0
                                evc_ctrl,
                                figdir=figdir,
                                nsamp=5,
                                titstr="%s" % modelnm,
                                savelabel=modelnm)

#%%
"""Compute layer-wise Hessian for real BigGANs"""
from pytorch_pretrained_biggan import truncated_noise_sample

BGAN = loadBigGAN()
G = BigGAN_wrapper(BGAN)
triali = 0
savedir = r"E:\OneDrive - Washington University in St. Louis\HessNetArchit\BigGAN\real_Hessians"
for triali in tqdm(range(50)):
    noisevec = torch.from_numpy(truncated_noise_sample(1, 128, 0.6)).cuda()
    feat = torch.cat(
        (noisevec, BGAN.embeddings.weight[:, triali:triali + 1].clone().T),
        dim=1)
    eigvals, eigvects, H = hessian_compute(
        G,
        feat,
        ImDist,
        hessian_method="BP",
    )
    np.savez(join(savedir, "eig_full_trial%d.npz" % (triali)),
             H=H,
             eva=eigvals,
             evc=eigvects,
             feat=feat.cpu().detach().numpy())
    feat.requires_grad_(True)
Esempio n. 29
0
def BigGAN_invert(target_tsr,
                  param,
                  basis="all",
                  maxstep=600,
                  init_code=None,
                  ckpt_steps=(50, 100, 200, 300, 400, 500),
                  savedir=savedir,
                  namestr="",
                  RND=None):
    lr = 10**param[0, 0]
    beta1 = 1 - 10**param[0, 1]  # param[2] = log10(1 - beta1)
    beta2 = 1 - 10**param[0, 2]  # param[3] = log10(1 - beta2)
    reg_w1 = 10**param[0, 3]  # param[2] = log10(1 - beta1)
    reg_w2 = 10**param[0, 4]  # param[3] = log10(1 - beta2)
    sched_gamma = param[0, 5]
    if init_code is None:
        noise_init = torch.from_numpy(truncated_noise_sample(1, 128)).cuda()
        class_init = 0.06 * torch.randn(1, 128).cuda()
    elif isinstance(init_code, np.ndarray):
        code_init = torch.from_numpy(init_code)
        noise_init = code_init[:, :128]
        class_init = code_init[:, 128:]
    elif isinstance(init_code, torch.Tensor):
        noise_init = init_code[:, :128]
        class_init = init_code[:, 128:]
    else:
        raise
    if basis == "all":
        latent_coef = (torch.cat((noise_init, class_init), dim=1)
                       @ evc_all).detach().clone().requires_grad_(True)
    elif basis == "sep":
        latent_coef = (torch.cat(
            (noise_init @ evc_nois, class_init @ evc_clas),
            dim=1)).detach().clone().requires_grad_(True)
    else:
        latent_coef = (torch.cat((noise_init, class_init),
                                 dim=1)).detach().clone().requires_grad_(True)
    optim = Adam([latent_coef], lr=lr, weight_decay=0, betas=(beta1, beta2))
    scheduler = torch.optim.lr_scheduler.StepLR(optim,
                                                step_size=200,
                                                gamma=sched_gamma)
    RNDid = np.random.randint(1000000) if RND is None else RND
    scores_all = []
    nos_norm = []
    cls_norm = []
    for step in range(maxstep):
        optim.zero_grad()
        if basis == "all":
            latent_code = latent_coef @ evc_all.T
        elif basis == "sep":
            latent_code = torch.cat((latent_coef[:, :128] @ evc_nois.T,
                                     latent_coef[:, 128:] @ evc_clas.T),
                                    dim=1)
        else:
            latent_code = latent_coef
        noise_vec = latent_code[:, :128]
        class_vec = latent_code[:, 128:]
        fitimg = BGAN.generator(latent_code, 0.7)
        fitimg = torch.clamp((1.0 + fitimg) / 2.0, 0, 1)
        dsim = alpha * ImDist(fitimg, target_tsr) + L1loss(fitimg,
                                                           target_tsr)  #
        loss = dsim + reg_w1 * noise_vec.pow(2).sum() + reg_w2 * class_vec.pow(
            2).sum()
        loss.backward()
        optim.step()
        scheduler.step()
        scores_all.append(dsim.item())
        nos_norm.append(noise_vec.norm().item())
        cls_norm.append(class_vec.norm().item())
        if (step + 1) % 10 == 0:
            print("step%d loss %.2f norm: cls: %.2f nois: %.1f" %
                  (step, scores_all[-1], cls_norm[-1], nos_norm[-1]))
        if (step + 1) in ckpt_steps:
            imcmp = ToPILImage()(make_grid(
                torch.cat((fitimg, target_tsr)).cpu()))
            imcmp.save(
                join(
                    savedir, "%s_H%sreg%06d_%.3f_s%d.jpg" %
                    (namestr, basis, RNDid, dsim.item(), step + 1)))

    imcmp = ToPILImage()(make_grid(torch.cat((fitimg, target_tsr)).cpu()))
    imcmp.save(
        join(
            savedir, "%s_H%sreg%06d_%.3f_final.jpg" %
            (namestr, basis, RNDid, dsim.item())))
    # imcmp.show()
    fig, ax = plt.subplots()
    ax.plot(scores_all, label="Loss")
    ax.set_ylabel("Image Dissimilarity", color="blue", fontsize=14)
    plt.legend()
    ax2 = ax.twinx()
    ax2.plot(nos_norm, color="orange", label="noise")
    ax2.plot(cls_norm, color="magenta", label="class")
    ax2.set_ylabel("L2 Norm", color="red", fontsize=14)
    plt.legend()
    plt.title("lr %.E beta1 %.3f beta2 %.3f wd_nos %.E wd_cls %.E gamma %.1f" %
              (lr, beta1, beta2, reg_w1, reg_w2, sched_gamma))
    plt.savefig(
        join(
            savedir, "%s_traj_H%sreg%06d_%.3f.jpg" %
            (namestr, basis, RNDid, dsim.item())))
    np.savez(join(savedir, "%s_code_H%sreg%06d.jpg" % (
        namestr,
        basis,
        RNDid,
    )),
             dsim=dsim.item(),
             scores_all=np.array(scores_all),
             nos_norm=np.array(nos_norm),
             cls_norm=np.array(cls_norm),
             code=latent_code.detach().cpu().numpy())
    return dsim.item()
Esempio n. 30
0
exprecord = []
csr_min, csr_max = 550, 600
if len(sys.argv) > 1:
    csr_min = int(sys.argv[1])
    csr_max = int(sys.argv[2])

for imgid in tqdm(range(csr_min, csr_max)):
    print("Processing image %d" % imgid)
    imgnm = "val_crop_%08d" % imgid
    img = imread(join(imgfolder, "val_crop_%08d.JPEG" % imgid))
    target_tsr = torch.from_numpy(img / 255.0).permute([2, 0, 1]).unsqueeze(0)
    target_tsr = target_tsr.float().cuda()
    #%%
    for triali in range(5):
        RNDid = np.random.randint(1000000)
        noise_init = torch.from_numpy(truncated_noise_sample(1, 128)).cuda()
        class_init = 0.06 * torch.randn(1, 128).cuda()
        init_code = torch.cat((noise_init, class_init), 1)
        dsim_all = BigGAN_invert(
            target_tsr,
            np.array([[-1.0, -0.5, -2.44, -4.66, -3.2552, 0.4563]]),
            init_code=init_code,
            basis="all",
            maxstep=600,
            ckpt_steps=(50, 100, 200, 300, 400, 500),
            savedir=savedir,
            namestr=imgnm,
            RND=RNDid)
        dsim_sep = BigGAN_invert(target_tsr,
                                 np.array([[-1, -0.5, -2.24, -5, -3.5, 0.59]]),
                                 init_code=init_code,