Ejemplo n.º 1
0
dataset = Corpus()
dataset.process_data()
sos = dataset.target_dict.word2idx['<sos>']
eos = dataset.target_dict.word2idx['<eos>']
args = np.load(os.path.join(cl_args.load_path, 'args.npy')).tolist()

model = RNNModel(args).cuda()
model.eval()
if cl_args.load_path:
    file = os.path.join(cl_args.load_path, 'model.pt')
    model.load_state_dict(torch.load(file))

itr = dataset.create_epoch_iterator('test', 1)
for i in xrange(50):
    source, target = itr.next()
    output = model.sample(source, sos, eos)

    print "Source: ", ''.join([
        dataset.source_dict.idx2word[x]
        for x in source.cpu().data.numpy()[:, 0]
    ])

    print "Original: ", ''.join([
        dataset.target_dict.idx2word[x]
        for x in target.cpu().data.numpy()[:, 0]
    ])
    print "Generated: ", ''.join([
        dataset.target_dict.idx2word[x]
        for x in output.cpu().data.numpy()[:, 0]
    ])
    print "\n"
Ejemplo n.º 2
0
        dataset.source_dict.idx2word[x]
        for x in target.cpu().data.squeeze().numpy()
    ]


model = RNNModel(cf).cuda()
model.eval()
if args.load_path:
    model.load_state_dict(torch.load(os.path.join(args.load_path, 'model.pt')))

itr = dataset.create_epoch_iterator('test', 32)
target_list = []
generated_list = []
source_list = []
for i, (source, lengths, target) in enumerate(itr):
    output = model.sample(source, lengths, sos, eos)
    source_list += source.split(1, 1)
    target_list += target.split(1, 1)
    generated_list += output.split(1, 1)

count = 0
for source, target, output in zip(source_list, target_list, generated_list):
    source = source_sent2idx(source)
    target = target_sent2idx(target)
    output = target_sent2idx(output)
    print "Source: ", ''.join(source)
    print "Original: ", ''.join(target)
    print "Generated: ", ''.join(output)
    if len(target) == len(output) and (np.asarray(target)
                                       == np.asarray(output)).all():
        count += 1