コード例 #1
0
    def __init__(self, args, schema, price_tracker, model_path, timed):
        super(PytorchNeuralSystem, self).__init__()
        self.schema = schema
        self.price_tracker = price_tracker
        self.timed_session = timed

        # TODO: do we need the dummy parser?
        dummy_parser = argparse.ArgumentParser(description='duh')
        options.add_model_arguments(dummy_parser)
        options.add_data_generator_arguments(dummy_parser)
        dummy_args = dummy_parser.parse_known_args([])[0]

        # Load the model.
        mappings, model, model_args = model_builder.load_test_model(
                model_path, args, dummy_args.__dict__)
        self.model_name = model_args.model
        vocab = mappings['utterance_vocab']
        self.mappings = mappings

        generator = get_generator(model, vocab, Scorer(args.alpha), args, model_args)
        builder = UtteranceBuilder(vocab, args.n_best, has_tgt=True)

        preprocessor = Preprocessor(schema, price_tracker, model_args.entity_encoding_form,
                model_args.entity_decoding_form, model_args.entity_target_form)
        textint_map = TextIntMap(vocab, preprocessor)
        remove_symbols = map(vocab.to_ind, (markers.EOS, markers.PAD))
        use_cuda = use_gpu(args)

        kb_padding = mappings['kb_vocab'].to_ind(markers.PAD)
        dialogue_batcher = DialogueBatcherFactory.get_dialogue_batcher(model=self.model_name,
            kb_pad=kb_padding,
            mappings=mappings, num_context=model_args.num_context)

        # TODO: class variable is not a good way to do this
        Dialogue.preprocessor = preprocessor
        Dialogue.textint_map = textint_map
        Dialogue.mappings = mappings
        Dialogue.num_context = model_args.num_context

        Env = namedtuple('Env', ['model', 'vocab', 'preprocessor', 'textint_map',
            'stop_symbol', 'remove_symbols', 'gt_prefix',
            'max_len', 'dialogue_batcher', 'cuda',
            'dialogue_generator', 'utterance_builder', 'model_args'])
        self.env = Env(model, vocab, preprocessor, textint_map,
            stop_symbol=vocab.to_ind(markers.EOS), remove_symbols=remove_symbols,
            gt_prefix=1,
            max_len=20, dialogue_batcher=dialogue_batcher, cuda=use_cuda,
            dialogue_generator=generator, utterance_builder=builder, model_args=model_args)
コード例 #2
0
    return report_stats

if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument('--random-seed', help='Random seed', type=int, default=1)
    parser.add_argument('--test', default=False, action='store_true', help='Test mode')
    parser.add_argument('--best', default=False, action='store_true', help='Test using the best model on dev set')
    parser.add_argument('--vocab-only', default=False, action='store_true', help='Only build the vocab')
    parser.add_argument('--verbose', default=False, action='store_true', help='More prints')
    parser.add_argument('--name', default='sl', type=str, help='Name of this experiment.')

    parser.add_argument('--agent-checkpoint', type=str, default=None, help='Directory to learned models')

    options.add_data_generator_arguments(parser)
    options.add_model_arguments(parser)
    options.add_trainer_arguments(parser)
    args = parser.parse_args()

    random.seed(args.random_seed)
    model_args = args

    if torch.cuda.is_available() and not args.gpuid:
        print("WARNING: You have a CUDA device, should run with -gpuid 0")

    if args.gpuid:
        cuda.set_device(args.gpuid[0])
        if args.random_seed > 0:
            torch.cuda.manual_seed(args.random_seed)

    loading_timer = tm.time()
コード例 #3
0
ファイル: neural_system.py プロジェクト: princeton-nlp/cocoa
    def __init__(self, args, schema, price_tracker, model_path, timed, name=None):
        super(PytorchNeuralSystem, self).__init__()
        self.schema = schema
        self.price_tracker = price_tracker
        self.timed_session = timed

        # TODO: do we need the dummy parser?
        dummy_parser = argparse.ArgumentParser(description='duh')
        options.add_model_arguments(dummy_parser)
        options.add_data_generator_arguments(dummy_parser)
        dummy_args = dummy_parser.parse_known_args([])[0]

        # Load the model.
        mappings, model, model_args, critic = rl_model_builder.load_test_model(
                model_path, args, dummy_args.__dict__)

        # Load critic from other model.
        # if name == 'tom':
        if hasattr(args, 'load_critic_from') and args.load_critic_from is not None:
            critic_path = args.load_critic_from
            _, _, _, critic = rl_model_builder.load_test_model(
                critic_path, args, dummy_args.__dict__)

        self.model_name = model_args.model
        vocab = mappings['utterance_vocab']
        # print(vocab.word_to_ind)
        self.mappings = mappings

        generator = get_generator(model, vocab, Scorer(args.alpha), args, model_args)
        builder = UtteranceBuilder(vocab, args.n_best, has_tgt=True)
        
        nlg_module = IRNLG(args)

        preprocessor = Preprocessor(schema, price_tracker, model_args.entity_encoding_form,
                model_args.entity_decoding_form, model_args.entity_target_form)
        textint_map = TextIntMap(vocab, preprocessor)
        remove_symbols = map(vocab.to_ind, (markers.EOS, markers.PAD))
        use_cuda = use_gpu(args)

        kb_padding = mappings['kb_vocab'].to_ind(markers.PAD)
        # print('args: ', model_args.dia_num, model_args.state_length)
        dialogue_batcher = DialogueBatcherFactory.get_dialogue_batcher(model=self.model_name,
            kb_pad=kb_padding,
            mappings=mappings, num_context=model_args.num_context,
            dia_num=model_args.dia_num, state_length=model_args.state_length)

        # TODO: class variable is not a good way to do this
        Dialogue.preprocessor = preprocessor
        Dialogue.textint_map = textint_map
        Dialogue.mappings = mappings
        Dialogue.num_context = model_args.num_context


        Env = namedtuple('Env', ['model', 'vocab', 'preprocessor', 'textint_map',
            'stop_symbol', 'remove_symbols', 'gt_prefix',
            'max_len', 'dialogue_batcher', 'cuda',
            'dialogue_generator', 'utterance_builder', 'model_args', 'critic', 'usetom', 
            'name', 'price_strategy', 'tom_type', 'nlg_module'])
        self.env = Env(model, vocab, preprocessor, textint_map,
            stop_symbol=vocab.to_ind(markers.EOS), remove_symbols=remove_symbols,
            gt_prefix=1,
            max_len=20, dialogue_batcher=dialogue_batcher, cuda=use_cuda,
            dialogue_generator=generator, utterance_builder=builder, model_args=model_args,
            critic=critic, usetom=(name == 'tom'), name=name,
            price_strategy=args.price_strategy, tom_type=args.tom_type, nlg_module=nlg_module)
コード例 #4
0
if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument('--random-seed',
                        help='Random seed',
                        type=int,
                        default=1)
    parser.add_argument(
        '--stats-file',
        help='Path to save json statistics (dataset, training etc.) file')
    add_data_generator_arguments(parser)
    args = parser.parse_args()

    # Know which arguments are for the models thus should not be
    # overwritten during test
    dummy_parser = argparse.ArgumentParser(description='duh')
    add_model_arguments(dummy_parser)
    add_data_generator_arguments(dummy_parser)
    dummy_args = dummy_parser.parse_known_args([])[0]

    if cuda.is_available() and not args.gpuid:
        print("WARNING: You have a CUDA device, should run with --gpuid 0")

    if args.gpuid:
        cuda.set_device(args.gpuid[0])

    # Load the model.
    mappings, model, model_args = \
        model_builder.load_test_model(args.checkpoint, args, dummy_args.__dict__)

    # Figure out src and tgt vocab
    make_model_mappings(model_args.model, mappings)
コード例 #5
0
ファイル: evaluate.py プロジェクト: youngornever/cocoa
import options

if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument('--random-seed',
                        help='Random seed',
                        type=int,
                        default=1)
    options.add_data_generator_arguments(parser)
    options.add_generator_arguments(parser)
    args = parser.parse_args()

    # Know which arguments are for the models thus should not be
    # overwritten during test
    dummy_parser = argparse.ArgumentParser(description='duh')
    options.add_model_arguments(dummy_parser)
    options.add_data_generator_arguments(dummy_parser)
    dummy_args = dummy_parser.parse_known_args([])[0]

    if cuda.is_available() and not args.gpuid:
        print("WARNING: You have a CUDA device, should run with --gpuid 0")

    if args.gpuid:
        cuda.set_device(args.gpuid[0])

    # Load the model.
    mappings, model, model_args = \
        model_builder.load_test_model(args.checkpoint, args, dummy_args.__dict__)

    # Figure out src and tgt vocab
    make_model_mappings(model_args.model, mappings)