Esempio n. 1
0
    captions.append(lin)

for caption in captions:
    vocab.add_sentence(caption)


def tokenizer(text):  # create a tokenizer function
    return text.split(' ')


inp_text = sys.argv[1]
print(inp_text)
tokens = tokenizer(inp_text)
codes = []
for t in tokens:
    codes.append(vocab.to_index(t))

print(codes)
c_tokens = [0] * 256  # fill to match text_seq_len
c_tokens[:len(codes)] = codes

text = torch.LongTensor(codes).unsqueeze(0).to(
    device)  # a minibatch of text (numerical tokens)
mask = torch.ones_like(text).bool().to(device)
oimgs = dalle.generate_images(text, mask=mask)
ts = int(time.time())
print(inp_text, ts)
save_image(oimgs,
           'results/gendalle' + name + '_epoch_' + str(dalle_epoch) + '-' +
           str(ts) + '.png',
           normalize=True)