Ejemplo n.º 1
0
    def __getitem__(self, ind):
        key = self.keys[ind]
        text_file = self.text_files[key]
        image_file = self.image_files[key]

        image = Image.open(image_file)
        descriptions = text_file.read_text().split('\n')
        descriptions = list(filter(lambda t: len(t) > 0, descriptions))
        description = choice(descriptions)

        tokenized_text = tokenizer.tokenize(
            description, self.text_len,
            truncate_text=args.truncate_captions).squeeze(0)

        image_tensor = self.image_tranform(image)
        return tokenized_text, image_tensor
Ejemplo n.º 2
0
else:
    vae = VQGanVAE1024()


dalle = DALLE(vae = vae, **dalle_params).cuda()

dalle.load_state_dict(weights)

# generate images

image_size = vae.image_size

texts = args.text.split('|')

for text in tqdm(texts):
    text = tokenizer.tokenize([args.text], dalle.text_seq_len).cuda()

    text = repeat(text, '() n -> b n', b = args.num_images)

    outputs = []

    for text_chunk in tqdm(text.split(args.batch_size), desc = f'generating images for - {text}'):
        output = dalle.generate_images(text_chunk, filter_thres = args.top_k)
        outputs.append(output)

    outputs = torch.cat(outputs)

    # save all images

    outputs_dir = Path(args.outputs_dir) / args.text.replace(' ', '_')
    outputs_dir.mkdir(parents = True, exist_ok = True)
Ejemplo n.º 3
0
load_obj = torch.load(str(dalle_path))
dalle_params, vae_params, weights = load_obj.pop('hparams'), load_obj.pop(
    'vae_params'), load_obj.pop('weights')

dalle_params.pop('vae', None)  # cleanup later

vae = OpenAIDiscreteVAE()

dalle = DALLE(vae=vae, **dalle_params).cuda()

dalle.load_state_dict(weights)
image_size = vae.image_size

for text in tqdm(texts):
    text = tokenizer.tokenize([inputs], dalle.text_seq_len).cuda()

    text = repeat(text, '() n -> b n', b=num_images)

    outputs = []

    for text_chunk in tqdm(text.split(batch_size),
                           desc=f'generating images for - {text}'):
        output = dalle.generate_images(text_chunk, filter_thres=top_k)
        outputs.append(output)

    outputs = torch.cat(outputs)

    # save all images
    # This should be sent from server to client
    outputs_dir = Path("./outputs") / inputs.replace(' ', '_')[:(100)]
Ejemplo n.º 4
0
def tokenize(s):
    return tokenizer.tokenize(
        s.decode('utf-8'),
        TEXT_SEQ_LEN,
        truncate_text=args.truncate_captions).squeeze(0)
Ejemplo n.º 5
0
dalle.load_state_dict(weights)

# generate images

image_size = vae.image_size

texts = args.text.split('|')

for j, text in tqdm(enumerate(texts)):
    if args.gentxt:
        text_tokens, gen_texts = dalle.generate_texts(text=text,
                                                      filter_thres=args.top_k)
        text = gen_texts[0]
    else:
        text_tokens = tokenizer.tokenize([text], dalle.text_seq_len).cuda()

    text_tokens = repeat(text_tokens, '() n -> b n', b=args.num_images)

    outputs = []

    for text_chunk in tqdm(text_tokens.split(args.batch_size),
                           desc=f'generating images for - {text}'):
        output = dalle.generate_images(text_chunk, filter_thres=args.top_k)
        outputs.append(output)

    outputs = torch.cat(outputs)

    # save all images
    file_name = text
    outputs_dir = Path(args.outputs_dir) / file_name.replace(' ', '_')[:(100)]