Exemple #1
0
def load_test_model(model_path, opt, dummy_opt):
    checkpoint = torch.load(model_path,
                            map_location=lambda storage, loc: storage)

    model_opt = checkpoint['opt']
    for arg in dummy_opt:
        if arg not in model_opt:
            model_opt.__dict__[arg] = dummy_opt[arg]
    for attribute in ["share_embeddings", "stateful"]:
        if not hasattr(model_opt, attribute):
            model_opt.__dict__[attribute] = False

    # TODO: fix this
    if model_opt.stateful and not opt.sample:
        raise ValueError(
            'Beam search generator does not work with stateful models yet')

    mappings = read_pickle('{}/vocab.pkl'.format(model_opt.mappings))

    # mappings = read_pickle('{0}/{1}/vocab.pkl'.format(model_opt.mappings, model_opt.model))
    mappings = make_model_mappings(model_opt.model, mappings)

    model = make_base_model(model_opt, mappings, use_gpu(opt), checkpoint)
    model.eval()
    model.generator.eval()
    return mappings, model, model_opt
Exemple #2
0
 def load_mappings(self, model_type, mappings_path, schema, preprocessor):
     vocab_path = os.path.join(mappings_path, 'vocab.pkl')
     if not os.path.exists(vocab_path):
         print 'Vocab not found at', vocab_path
         mappings = create_mappings(self.dialogues['train'], schema,
             preprocessor.entity_forms.values())
         write_pickle(mappings, vocab_path)
         print('Wrote mappings to {}.'.format(vocab_path))
     else:
         print 'Loading vocab from', vocab_path
         mappings = read_pickle(vocab_path)
         for k, v in mappings.iteritems():
             print k, v.size
         mappings = make_model_mappings(model_type, mappings)
         return mappings
Exemple #3
0
def load_test_model(model_path, opt, dummy_opt):
    checkpoint = torch.load(model_path,
                            map_location=lambda storage, loc: storage)

    model_opt = checkpoint['opt']
    for arg in dummy_opt:
        if arg not in model_opt:
            model_opt.__dict__[arg] = dummy_opt[arg]

    mappings = read_pickle('{}/vocab.pkl'.format(model_opt.mappings))
    mappings = make_model_mappings(model_opt.model, mappings)

    model = make_base_model(model_opt, mappings, use_gpu(opt), checkpoint)
    model.eval()
    model.generator.eval()
    return mappings, model, model_opt
Exemple #4
0
def load_test_model(model_path, opt, dummy_opt):
    if model_path is not None:
        print('Load model from {}.'.format(model_path))
        checkpoint = torch.load(model_path, map_location=lambda storage, loc: storage)

        model_opt = checkpoint['opt']
        for arg in dummy_opt:
            if arg not in model_opt:
                model_opt.__dict__[arg] = dummy_opt[arg]
    else:
        print('Build model from scratch.')
        checkpoint = None
        model_opt = opt

    mappings = read_pickle('{}/vocab.pkl'.format(model_opt.mappings))

    # mappings = read_pickle('{0}/{1}/vocab.pkl'.format(model_opt.mappings, model_opt.model))
    mappings = make_model_mappings(model_opt.model, mappings)

    model, critic = make_base_model(model_opt, mappings, use_gpu(opt), checkpoint)
    model.eval()
    critic.eval()
    return mappings, model, model_opt, critic
Exemple #5
0
    dummy_parser = argparse.ArgumentParser(description='duh')
    add_model_arguments(dummy_parser)
    add_data_generator_arguments(dummy_parser)
    dummy_args = dummy_parser.parse_known_args([])[0]

    if cuda.is_available() and not args.gpuid:
        print("WARNING: You have a CUDA device, should run with --gpuid 0")

    if args.gpuid:
        cuda.set_device(args.gpuid[0])

    # Load the model.
    mappings, model, model_args = \
        model_builder.load_test_model(args.checkpoint, args, dummy_args.__dict__)

    # Figure out src and tgt vocab
    make_model_mappings(model_args.model, mappings)

    schema = Schema(model_args.schema_path, None)
    data_generator = get_data_generator(args, model_args, schema, test=True)

    # Prefix: [GO]
    scorer = Scorer(args.alpha)
    generator = get_generator(model, mappings['tgt_vocab'], scorer, args,
                              model_args)
    builder = UtteranceBuilder(mappings['tgt_vocab'],
                               args.n_best,
                               has_tgt=True)
    evaluator = Evaluator(model, mappings, generator, builder, gt_prefix=1)
    evaluator.evaluate(args, model_args, data_generator)