Пример #1
0
def test(args):
    source_vocab = Vocab.load(args.model_path + SRC_VOCAB_NAME)
    target_vocab = Vocab.load(args.model_path + TAR_VOCAB_NAME)
    vocab_size, hidden_size, maxout_hidden_size, embed_size = Backup.load(
        args.model_path + HPARAM_NAME)

    att_encdec = ABED(vocab_size, hidden_size, maxout_hidden_size, embed_size)
    if args.use_gpu:
        att_encdec.to_gpu()
    serializers.load_hdf5(args.model_path + str(args.epochs) + '.attencdec',
                          att_encdec)

    with open(args.output + str(args.epochs), 'w') as fp:
        source_gen = word_list(args.source)
        target_gen = word_list(args.target)
        batch_gen = batch(sort(source_gen, target_gen, 100 * args.minibatch),
                          args.minibatch)
        for source_batch, target_batch in batch_gen:
            source_batch = fill_batch_end(source_batch)
            target_batch = fill_batch_end(target_batch)
            if args.beam_search:
                hyp_batch = forward_beam(source_batch, None, source_vocab,
                                         target_vocab, att_encdec, False,
                                         args.limit, args.beam_size)
            else:
                hyp_batch = forward(source_batch, None, source_vocab,
                                    target_vocab, att_encdec, False,
                                    args.limit)
            for i, hyp in enumerate(hyp_batch):
                hyp.append(END)
                hyp = hyp[:hyp.index(END)]
                show(source_batch[i], target_batch[i], hyp, "TEST")
                fwrite(source_batch[i], target_batch[i], hyp, fp)
def test(args):
    source_vocab = Vocab.load(args.model_path+SRC_VOCAB_NAME)
    target_vocab= Vocab.load(args.model_path+TAR_VOCAB_NAME) 
    vocab_size, hidden_size, maxout_hidden_size, embed_size = Backup.load(args.model_path+HPARAM_NAME)

    att_encdec = ABED(vocab_size, hidden_size, maxout_hidden_size, embed_size)
    if args.use_gpu:
        att_encdec.to_gpu()
    serializers.load_hdf5(args.model_path+str(args.epochs)+'.attencdec', att_encdec)

    with open(args.output+str(args.epochs), 'w') as fp:
        source_gen = word_list(args.source)
        target_gen = word_list(args.target)
        batch_gen = batch(sort(source_gen, target_gen, 100*args.minibatch), args.minibatch) 
        for source_batch, target_batch in batch_gen: 
            source_batch = fill_batch_end(source_batch)
            target_batch = fill_batch_end(target_batch) 
            if args.beam_search:
                hyp_batch = forward_beam(source_batch, None, source_vocab, target_vocab, att_encdec, False, args.limit, args.beam_size)
            else:
                hyp_batch = forward(source_batch, None, source_vocab, target_vocab, att_encdec, False, args.limit)
            for i, hyp in enumerate(hyp_batch):
                hyp.append(END)
                hyp = hyp[:hyp.index(END)]
                show(source_batch[i], target_batch[i], hyp, "TEST")
                fwrite(source_batch[i], target_batch[i], hyp, fp)